Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-15594 + local ns=monitoring-2-0-15594 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.PB6foUoCz4 ++ mktemp + local LAST_ERR=/tmp/tmp.fig97yADjB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PB6foUoCz4 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.fig97yADjB + rm /tmp/tmp.PB6foUoCz4 /tmp/tmp.fig97yADjB + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UZpr9Htr10 ++ mktemp + local LAST_ERR=/tmp/tmp.GGXXVgyinz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UZpr9Htr10 + cat /tmp/tmp.GGXXVgyinz + rm /tmp/tmp.UZpr9Htr10 /tmp/tmp.GGXXVgyinz + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JL9l5umrIk ++ mktemp + local LAST_ERR=/tmp/tmp.3T4murJ2jD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JL9l5umrIk + cat /tmp/tmp.3T4murJ2jD + rm /tmp/tmp.JL9l5umrIk /tmp/tmp.3T4murJ2jD + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.gBl8vTMg0t ++ mktemp + local LAST_ERR=/tmp/tmp.g3ExWdzr5S + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gBl8vTMg0t + cat /tmp/tmp.g3ExWdzr5S + rm /tmp/tmp.gBl8vTMg0t /tmp/tmp.g3ExWdzr5S + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.YkHUWDT35O ++ mktemp + local LAST_ERR=/tmp/tmp.J9vSEO25LY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YkHUWDT35O clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.J9vSEO25LY + rm /tmp/tmp.YkHUWDT35O /tmp/tmp.J9vSEO25LY + return 0 + check_crd_for_deletion PR-2276-96813772 + local git_tag=PR-2276-96813772 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2276-96813772/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed s/---//g ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.O6noG0inxF +++ mktemp ++ local LAST_ERR=/tmp/tmp.P0dPYfVcHJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.O6noG0inxF ++ cat /tmp/tmp.P0dPYfVcHJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.O6noG0inxF ++ cat /tmp/tmp.P0dPYfVcHJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.O6noG0inxF ++ cat /tmp/tmp.P0dPYfVcHJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.O6noG0inxF ++ cat /tmp/tmp.P0dPYfVcHJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.O6noG0inxF /tmp/tmp.P0dPYfVcHJ ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.Q7RK6FJT9Q ++ mktemp + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.q7RLekcsAk + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl get ns + local LAST_OUT=/tmp/tmp.7rQdgjWkVX ++ mktemp + local LAST_ERR=/tmp/tmp.Q3jhS2lYb8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Q7RK6FJT9Q + cat /tmp/tmp.q7RLekcsAk + rm /tmp/tmp.Q7RK6FJT9Q /tmp/tmp.q7RLekcsAk + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-26778" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7rQdgjWkVX namespace "psmdb-operator" deleted + cat /tmp/tmp.Q3jhS2lYb8 + rm /tmp/tmp.7rQdgjWkVX /tmp/tmp.Q3jhS2lYb8 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.2IG8bYaWn0 ++ mktemp + local LAST_ERR=/tmp/tmp.ZzrpVFuN7w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2IG8bYaWn0 + cat /tmp/tmp.ZzrpVFuN7w + rm /tmp/tmp.2IG8bYaWn0 /tmp/tmp.ZzrpVFuN7w + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.d2ciWqgX1A ++ mktemp + local LAST_ERR=/tmp/tmp.Tlkm0KpjJ4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.d2ciWqgX1A namespace/psmdb-operator created + cat /tmp/tmp.Tlkm0KpjJ4 + rm /tmp/tmp.d2ciWqgX1A /tmp/tmp.Tlkm0KpjJ4 + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.RmF0DOaphI +++ mktemp ++ local LAST_ERR=/tmp/tmp.r6xsbgTzA0 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RmF0DOaphI ++ cat /tmp/tmp.r6xsbgTzA0 ++ rm /tmp/tmp.RmF0DOaphI /tmp/tmp.r6xsbgTzA0 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Htp1gKYof0 ++ mktemp + local LAST_ERR=/tmp/tmp.LEJFtC3uqb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Htp1gKYof0 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5" modified. + cat /tmp/tmp.LEJFtC3uqb + rm /tmp/tmp.Htp1gKYof0 /tmp/tmp.LEJFtC3uqb + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2276-96813772' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2276-96813772 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.nu4DUWkCQH ++ mktemp + local LAST_ERR=/tmp/tmp.bkpoIv483o + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nu4DUWkCQH customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.bkpoIv483o + rm /tmp/tmp.nu4DUWkCQH /tmp/tmp.bkpoIv483o + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.UAJVMlC4VK ++ mktemp + local LAST_ERR=/tmp/tmp.61Ioayjvhz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UAJVMlC4VK clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.61Ioayjvhz + rm /tmp/tmp.UAJVMlC4VK /tmp/tmp.61Ioayjvhz + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2276-96813772") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Iux4cX9YA7 ++ mktemp + local LAST_ERR=/tmp/tmp.9ANsRoMwm2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Iux4cX9YA7 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.9ANsRoMwm2 + rm /tmp/tmp.Iux4cX9YA7 /tmp/tmp.9ANsRoMwm2 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.FlZ0aLDMrG +++ mktemp ++ local LAST_ERR=/tmp/tmp.D0xkd5DPjd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FlZ0aLDMrG ++ cat /tmp/tmp.D0xkd5DPjd ++ rm /tmp/tmp.FlZ0aLDMrG /tmp/tmp.D0xkd5DPjd ++ return 0 + wait_operator_pod percona-server-mongodb-operator-74b4cf489c-lb8r7 + local pod=percona-server-mongodb-operator-74b4cf489c-lb8r7 + set +o xtrace waiting for pod/percona-server-mongodb-operator-74b4cf489c-lb8r7 to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.1uh2KzVIPW +++ mktemp ++ local LAST_ERR=/tmp/tmp.BFOnn6jJvS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1uh2KzVIPW ++ cat /tmp/tmp.BFOnn6jJvS ++ rm /tmp/tmp.1uh2KzVIPW /tmp/tmp.BFOnn6jJvS ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-74b4cf489c-lb8r7 ++ mktemp + local LAST_OUT=/tmp/tmp.I7As7uBJ49 ++ mktemp + local LAST_ERR=/tmp/tmp.Csu35KT3a5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-74b4cf489c-lb8r7 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.I7As7uBJ49 + cat /tmp/tmp.Csu35KT3a5 + rm /tmp/tmp.I7As7uBJ49 /tmp/tmp.Csu35KT3a5 + return 0 2026-03-26T09:23:20.256Z INFO setup Manager starting up {"gitCommit": "96813772af244cf39192467fc2e61f90dca8c3cb", "gitBranch": "PR-2276-96813772", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-15594 + local namespace=monitoring-2-0-15594 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-15594' ++ mktemp + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-15594 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-15594 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.qYK0OeWIjH ++ mktemp + local LAST_OUT=/tmp/tmp.2Hfeb74Qhk ++ mktemp + local LAST_ERR=/tmp/tmp.VaDFWf76Dr + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Vgu3bfdhZO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-15594 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qYK0OeWIjH + cat /tmp/tmp.VaDFWf76Dr + rm /tmp/tmp.qYK0OeWIjH /tmp/tmp.VaDFWf76Dr + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2Hfeb74Qhk + cat /tmp/tmp.Vgu3bfdhZO + rm /tmp/tmp.2Hfeb74Qhk /tmp/tmp.Vgu3bfdhZO + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-15594 ++ mktemp + local LAST_OUT=/tmp/tmp.RWzBYew0Cu ++ mktemp + local LAST_ERR=/tmp/tmp.77WZ3klHcu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-15594 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RWzBYew0Cu + cat /tmp/tmp.77WZ3klHcu + rm /tmp/tmp.RWzBYew0Cu /tmp/tmp.77WZ3klHcu + return 0 + desc 'create namespace monitoring-2-0-15594' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-15594 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-15594 ++ mktemp + local LAST_OUT=/tmp/tmp.yfFcZPGm1f ++ mktemp + local LAST_ERR=/tmp/tmp.lN57PfiSMT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-15594 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yfFcZPGm1f namespace/monitoring-2-0-15594 created + cat /tmp/tmp.lN57PfiSMT + rm /tmp/tmp.yfFcZPGm1f /tmp/tmp.lN57PfiSMT + return 0 + set_kube_ctx monitoring-2-0-15594 + local namespace=monitoring-2-0-15594 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.LnETkjdi38 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7gH58dCUFI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LnETkjdi38 ++ cat /tmp/tmp.7gH58dCUFI ++ rm /tmp/tmp.LnETkjdi38 /tmp/tmp.7gH58dCUFI ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5 --namespace=monitoring-2-0-15594 ++ mktemp + local LAST_OUT=/tmp/tmp.wFJzgyqc9A ++ mktemp + local LAST_ERR=/tmp/tmp.zFFeT632vt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5 --namespace=monitoring-2-0-15594 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wFJzgyqc9A Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2276-96813772-6-cluster5" modified. + cat /tmp/tmp.zFFeT632vt + rm /tmp/tmp.wFJzgyqc9A /tmp/tmp.zFFeT632vt + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.alI7krWAps ++ mktemp + local LAST_ERR=/tmp/tmp.wZLf21bptw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.alI7krWAps namespace/cert-manager created + cat /tmp/tmp.wZLf21bptw + rm /tmp/tmp.alI7krWAps /tmp/tmp.wZLf21bptw + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.wPWBZsL1AR ++ mktemp + local LAST_ERR=/tmp/tmp.Kf7H6bCHqL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wPWBZsL1AR namespace/cert-manager labeled + cat /tmp/tmp.Kf7H6bCHqL + rm /tmp/tmp.wPWBZsL1AR /tmp/tmp.Kf7H6bCHqL + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.u2obJ7Uvgv ++ mktemp + local LAST_ERR=/tmp/tmp.KVoYG1mxDU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u2obJ7Uvgv namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.KVoYG1mxDU Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.u2obJ7Uvgv /tmp/tmp.KVoYG1mxDU + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.OzpYTcPj4W ++ mktemp + local LAST_ERR=/tmp/tmp.hWyaSJhTc1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OzpYTcPj4W pod/cert-manager-559d798845-g2l4h condition met pod/cert-manager-cainjector-64958d9c7c-m9qdj condition met pod/cert-manager-webhook-7fb6f99b56-wf2pk condition met + cat /tmp/tmp.hWyaSJhTc1 + rm /tmp/tmp.OzpYTcPj4W /tmp/tmp.hWyaSJhTc1 + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Thu Mar 26 09:26:24 2026 NAMESPACE: monitoring-2-0-15594 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-15594.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.ylqMvCLGfK ++ mktemp + local LAST_ERR=/tmp/tmp.nAiWR3bpG4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ylqMvCLGfK + cat /tmp/tmp.nAiWR3bpG4 + rm /tmp/tmp.ylqMvCLGfK /tmp/tmp.nAiWR3bpG4 + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.mMOiwSnEru ++ mktemp + local LAST_ERR=/tmp/tmp.TZj02MvoT1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mMOiwSnEru secret/some-users created secret/some-users unchanged + cat /tmp/tmp.TZj02MvoT1 + rm /tmp/tmp.mMOiwSnEru /tmp/tmp.TZj02MvoT1 + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.QE9er71XtF ++ mktemp + local LAST_ERR=/tmp/tmp.JZZYqCIt5l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QE9er71XtF deployment.apps/psmdb-client created + cat /tmp/tmp.JZZYqCIt5l + rm /tmp/tmp.QE9er71XtF /tmp/tmp.JZZYqCIt5l + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2276-96813772"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-15594/g + yq eval '.spec.upgradeOptions.apply="Never"' + local LAST_OUT=/tmp/tmp.DZZZS3G4bR ++ mktemp + local LAST_ERR=/tmp/tmp.c4cgpaL2IT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DZZZS3G4bR perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.c4cgpaL2IT + rm /tmp/tmp.DZZZS3G4bR /tmp/tmp.c4cgpaL2IT + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready......................OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kc7QaoTGa8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YGDUTRSYxK ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Kc7QaoTGa8 ++ cat /tmp/tmp.YGDUTRSYxK ++ rm /tmp/tmp.Kc7QaoTGa8 /tmp/tmp.YGDUTRSYxK ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0uokymPF22 +++ mktemp ++ local LAST_ERR=/tmp/tmp.erlPwr9a67 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0uokymPF22 ++ cat /tmp/tmp.erlPwr9a67 ++ rm /tmp/tmp.0uokymPF22 /tmp/tmp.erlPwr9a67 ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.q1OtM8zWsx +++ mktemp ++ local LAST_ERR=/tmp/tmp.nd7RQRjdzb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.q1OtM8zWsx ++ cat /tmp/tmp.nd7RQRjdzb ++ rm /tmp/tmp.q1OtM8zWsx /tmp/tmp.nd7RQRjdzb ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.85MoZObDeY ++ mktemp + local LAST_ERR=/tmp/tmp.z6aiIKfXB5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.85MoZObDeY + cat /tmp/tmp.z6aiIKfXB5 + rm /tmp/tmp.85MoZObDeY /tmp/tmp.z6aiIKfXB5 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:31:20+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-15594 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-15594 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Z7RzUXCYvC +++ mktemp ++ local LAST_ERR=/tmp/tmp.78E359Awqr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Z7RzUXCYvC ++ cat /tmp/tmp.78E359Awqr ++ rm /tmp/tmp.Z7RzUXCYvC /tmp/tmp.78E359Awqr ++ return 0 + local client_container=psmdb-client-699f458f75-j6z7n + kubectl_bin exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.yqmt3ltpZj ++ mktemp + local LAST_ERR=/tmp/tmp.uXhuqcf3wW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yqmt3ltpZj Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:31:33.246Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("ee1cbaa6-4a8e-4d03-afbc-3947b5eb5402") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.uXhuqcf3wW + rm /tmp/tmp.yqmt3ltpZj /tmp/tmp.uXhuqcf3wW + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-15594 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-15594 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P3PLy8bjKS +++ mktemp ++ local LAST_ERR=/tmp/tmp.mrsv6tlW5v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P3PLy8bjKS ++ cat /tmp/tmp.mrsv6tlW5v ++ rm /tmp/tmp.P3PLy8bjKS /tmp/tmp.mrsv6tlW5v ++ return 0 + local client_container=psmdb-client-699f458f75-j6z7n + kubectl_bin exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.fsd0ef1evk ++ mktemp + local LAST_ERR=/tmp/tmp.vjb4emIfWG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fsd0ef1evk Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:31:35.909Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("2c585224-9659-4ec2-9b79-11f58be2ff17") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1774517496, 1), "signature" : { "hash" : BinData(0,"wmo9L0SCHu+9HOZmXFZiQAcVQhM="), "keyId" : NumberLong("7621494216362819608") } }, "operationTime" : Timestamp(1774517495, 7) } bye + cat /tmp/tmp.vjb4emIfWG + rm /tmp/tmp.fsd0ef1evk /tmp/tmp.vjb4emIfWG + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-15594 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-15594 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.v6dT6KJmrl +++ mktemp ++ local LAST_ERR=/tmp/tmp.5DHaiDznkS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v6dT6KJmrl ++ cat /tmp/tmp.5DHaiDznkS ++ rm /tmp/tmp.v6dT6KJmrl /tmp/tmp.5DHaiDznkS ++ return 0 + local client_container=psmdb-client-699f458f75-j6z7n + kubectl_bin exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.ICV0rLC8Sk ++ mktemp + local LAST_ERR=/tmp/tmp.gZcpKbBs2T + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ICV0rLC8Sk Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:31:38.613Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("bc4f9998-b0a8-48e9-8216-a8551f264bf9") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.gZcpKbBs2T + rm /tmp/tmp.ICV0rLC8Sk /tmp/tmp.gZcpKbBs2T + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-15594 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-15594 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9LApspTIQL +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjpyKk00uw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9LApspTIQL ++ cat /tmp/tmp.gjpyKk00uw ++ rm /tmp/tmp.9LApspTIQL /tmp/tmp.gjpyKk00uw ++ return 0 + local client_container=psmdb-client-699f458f75-j6z7n + kubectl_bin exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.GbF20nJiVC ++ mktemp + local LAST_ERR=/tmp/tmp.N4JJhGXvfX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GbF20nJiVC Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:31:41.273Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("085ff71f-f76e-4926-9560-01cffe52be19") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.N4JJhGXvfX + rm /tmp/tmp.GbF20nJiVC /tmp/tmp.N4JJhGXvfX + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-15594 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-15594 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qg6ZAVWIC1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nRfYcW26lb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Qg6ZAVWIC1 ++ cat /tmp/tmp.nRfYcW26lb ++ rm /tmp/tmp.Qg6ZAVWIC1 /tmp/tmp.nRfYcW26lb ++ return 0 + local client_container=psmdb-client-699f458f75-j6z7n + kubectl_bin exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.bdqjK1zNDd ++ mktemp + local LAST_ERR=/tmp/tmp.laevXUAGAS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-j6z7n -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bdqjK1zNDd Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-15594.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-26T09:31:43.364Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e9f3d570-9b2e-4342-97fd-8b3f33546161") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.laevXUAGAS + rm /tmp/tmp.bdqjK1zNDd /tmp/tmp.laevXUAGAS + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lb9q5cCtjK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.g20Sdn3DaC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.lb9q5cCtjK ++++ cat /tmp/tmp.g20Sdn3DaC ++++ rm /tmp/tmp.lb9q5cCtjK /tmp/tmp.g20Sdn3DaC ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.W1lxt2sRxo +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EuxUF7Qp99 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.W1lxt2sRxo ++++ cat /tmp/tmp.EuxUF7Qp99 ++++ rm /tmp/tmp.W1lxt2sRxo /tmp/tmp.EuxUF7Qp99 ++++ return 0 +++ local ip=34.72.232.131 +++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' +++ echo 34.72.232.131 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.72.232.131/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 285 86 --:--:-- --:--:-- --:--:-- 371 + API_KEY='"eyJrIjoiRmNxTURMQWRBSjNtdGpnaUQxa0YzTkE0N1FzRWxKVUMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiRmNxTURMQWRBSjNtdGpnaUQxa0YzTkE0N1FzRWxKVUMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.B7Efjna9fa ++ mktemp + local LAST_ERR=/tmp/tmp.i0yZRYfhzX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiRmNxTURMQWRBSjNtdGpnaUQxa0YzTkE0N1FzRWxKVUMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.B7Efjna9fa secret/some-users patched + cat /tmp/tmp.i0yZRYfhzX + rm /tmp/tmp.B7Efjna9fa /tmp/tmp.i0yZRYfhzX + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nfCLkmFLvC +++ mktemp ++ local LAST_ERR=/tmp/tmp.XDGGu17s9B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nfCLkmFLvC ++ cat /tmp/tmp.XDGGu17s9B ++ rm /tmp/tmp.nfCLkmFLvC /tmp/tmp.XDGGu17s9B ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u9u31Wy5vA +++ mktemp ++ local LAST_ERR=/tmp/tmp.vKYRDWdG7e ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u9u31Wy5vA ++ cat /tmp/tmp.vKYRDWdG7e ++ rm /tmp/tmp.u9u31Wy5vA /tmp/tmp.vKYRDWdG7e ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uL2aVV3sAx +++ mktemp ++ local LAST_ERR=/tmp/tmp.U0VkDG19Xj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uL2aVV3sAx ++ cat /tmp/tmp.U0VkDG19Xj ++ rm /tmp/tmp.uL2aVV3sAx /tmp/tmp.U0VkDG19Xj ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................................................................................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WiSHDP5ml4 ++ mktemp + local LAST_ERR=/tmp/tmp.MoW8yHGT7u + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WiSHDP5ml4 + cat /tmp/tmp.MoW8yHGT7u + rm /tmp/tmp.WiSHDP5ml4 /tmp/tmp.MoW8yHGT7u + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.Xji4JvHybv/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:38:45+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.Xji4JvHybv/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IivDe24WmB ++ mktemp + local LAST_ERR=/tmp/tmp.RHmxSC0E7n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IivDe24WmB + cat /tmp/tmp.RHmxSC0E7n + rm /tmp/tmp.IivDe24WmB /tmp/tmp.RHmxSC0E7n + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.Xji4JvHybv/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-26T09:38:46+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.Xji4JvHybv/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KyN9cbcEPx ++ mktemp + local LAST_ERR=/tmp/tmp.I5zhqYQTWk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KyN9cbcEPx + cat /tmp/tmp.I5zhqYQTWk + rm /tmp/tmp.KyN9cbcEPx /tmp/tmp.I5zhqYQTWk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.Xji4JvHybv/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:38:48+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.Xji4JvHybv/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.3bxf0XRE4n ++ mktemp + local LAST_ERR=/tmp/tmp.1v33WJzWGD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3bxf0XRE4n + cat /tmp/tmp.1v33WJzWGD + rm /tmp/tmp.3bxf0XRE4n /tmp/tmp.1v33WJzWGD + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.Xji4JvHybv/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-26T09:38:49+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.Xji4JvHybv/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15594", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.bFmG28jkUl ++ mktemp + local LAST_ERR=/tmp/tmp.sijHX8oIDI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bFmG28jkUl + cat /tmp/tmp.sijHX8oIDI + rm /tmp/tmp.bFmG28jkUl /tmp/tmp.sijHX8oIDI + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.Xji4JvHybv/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.Xji4JvHybv/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-26T09:38:50+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-15594-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-15594-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774517870 ++ /usr/sbin/date -u +%s + local end=1774517930 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.d3Sp5OQONt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0Mg3EwC1Uy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.d3Sp5OQONt +++ cat /tmp/tmp.0Mg3EwC1Uy +++ rm /tmp/tmp.d3Sp5OQONt /tmp/tmp.0Mg3EwC1Uy +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.f2YrqjCMxH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ldXI1hCH1d +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.f2YrqjCMxH +++ cat /tmp/tmp.ldXI1hCH1d +++ rm /tmp/tmp.f2YrqjCMxH /tmp/tmp.ldXI1hCH1d +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + local endpoint=34.72.232.131 + curl -s -k 'https://admin:admin@34.72.232.131/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-rs0-1%22%7D%29&start=1774517870&end=1774517930&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774512281" "1774512281" + get_metric_values mongodb_connections monitoring-2-0-15594-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-15594-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774517872 ++ /usr/sbin/date -u +%s + local end=1774517932 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J2euOKu5aI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pflrmvQMYR +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J2euOKu5aI +++ cat /tmp/tmp.pflrmvQMYR +++ rm /tmp/tmp.J2euOKu5aI /tmp/tmp.pflrmvQMYR +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.dF0PMtjuOE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sTXmzywXtI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dF0PMtjuOE +++ cat /tmp/tmp.sTXmzywXtI +++ rm /tmp/tmp.dF0PMtjuOE /tmp/tmp.sTXmzywXtI +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + local endpoint=34.72.232.131 + curl -s -k 'https://admin:admin@34.72.232.131/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-rs0-1%22%7D%29&start=1774517872&end=1774517932&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-15594-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-15594-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774517875 ++ /usr/sbin/date -u +%s + local end=1774517935 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.ebEtqNQLn2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MDCOrvgpyw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ebEtqNQLn2 +++ cat /tmp/tmp.MDCOrvgpyw +++ rm /tmp/tmp.ebEtqNQLn2 /tmp/tmp.MDCOrvgpyw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5mLC3bP60r ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JIBROKEqTu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5mLC3bP60r +++ cat /tmp/tmp.JIBROKEqTu +++ rm /tmp/tmp.5mLC3bP60r /tmp/tmp.JIBROKEqTu +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + local endpoint=34.72.232.131 + curl -s -k 'https://admin:admin@34.72.232.131/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-cfg-1%22%7D%29&start=1774517875&end=1774517935&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774517166" "1774517166" + get_metric_values mongodb_connections monitoring-2-0-15594-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-15594-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774517877 ++ /usr/sbin/date -u +%s + local end=1774517937 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uXuVCEIvy0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RPxVS2e1tN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uXuVCEIvy0 +++ cat /tmp/tmp.RPxVS2e1tN +++ rm /tmp/tmp.uXuVCEIvy0 /tmp/tmp.RPxVS2e1tN +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.LlxMGt7HcY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6SalPsVKWg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LlxMGt7HcY +++ cat /tmp/tmp.6SalPsVKWg +++ rm /tmp/tmp.LlxMGt7HcY /tmp/tmp.6SalPsVKWg +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + local endpoint=34.72.232.131 + curl -s -k 'https://admin:admin@34.72.232.131/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-cfg-1%22%7D%29&start=1774517877&end=1774517937&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-15594-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-15594-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1774517881 ++ /usr/sbin/date -u +%s + local end=1774517941 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6uh9TSDBla ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OV4VAGBPBN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6uh9TSDBla +++ cat /tmp/tmp.OV4VAGBPBN +++ rm /tmp/tmp.6uh9TSDBla /tmp/tmp.OV4VAGBPBN +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.Irey71vTN4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AWBBxWA2tr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Irey71vTN4 +++ cat /tmp/tmp.AWBBxWA2tr +++ rm /tmp/tmp.Irey71vTN4 /tmp/tmp.AWBBxWA2tr +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + local endpoint=34.72.232.131 + curl -s -k 'https://admin:admin@34.72.232.131/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-15594-monitoring-mongos-0%22%7D%29&start=1774517881&end=1774517941&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1774517166" "1774517166" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:40:34+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:40:34+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vQTXHzcPHw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RoTjPSZm4X +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vQTXHzcPHw +++ cat /tmp/tmp.RoTjPSZm4X +++ rm /tmp/tmp.vQTXHzcPHw /tmp/tmp.RoTjPSZm4X +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1Zgd4GpWgK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bw1FwmwPLt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1Zgd4GpWgK +++ cat /tmp/tmp.bw1FwmwPLt +++ rm /tmp/tmp.1Zgd4GpWgK /tmp/tmp.bw1FwmwPLt +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + endpoint=34.72.232.131 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.72.232.131/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-25T21:40:36+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-26T09:40:36+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JJMsL973PI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UqIAkGzyvG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JJMsL973PI +++ cat /tmp/tmp.UqIAkGzyvG +++ rm /tmp/tmp.JJMsL973PI /tmp/tmp.UqIAkGzyvG +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ITdfIVEBpa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GATG88SNCL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ITdfIVEBpa +++ cat /tmp/tmp.GATG88SNCL +++ rm /tmp/tmp.ITdfIVEBpa /tmp/tmp.GATG88SNCL +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + endpoint=34.72.232.131 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.72.232.131/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-26T09:40:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-26T09:34:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-26T09:28:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-26T09:22:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-26T09:16:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-26T09:10:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-26T09:04:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-26T08:58:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-26T08:52:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-26T08:46:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-26T08:40:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-26T08:34:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-26T08:28:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-26T08:22:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-26T08:16:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-26T08:10:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-26T08:04:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-26T07:58:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-26T07:52:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-26T07:46:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-26T07:40:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-26T07:34:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-26T07:28:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-26T07:22:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-26T07:16:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-26T07:10:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-26T07:04:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-26T06:58:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-26T06:52:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-26T06:46:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-26T06:40:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-26T06:34:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-26T06:28:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-26T06:22:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-26T06:16:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-26T06:10:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-26T06:04:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-26T05:58:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-26T05:52:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-26T05:46:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-26T05:40:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-26T05:34:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-26T05:28:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-26T05:22:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-26T05:16:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-26T05:10:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-26T05:04:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-26T04:58:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-26T04:52:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-26T04:46:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-26T04:40:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-26T04:34:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-26T04:28:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-26T04:22:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-26T04:16:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-26T04:10:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-26T04:04:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-26T03:58:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-26T03:52:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-26T03:46:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-26T03:40:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-26T03:34:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-26T03:28:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-26T03:22:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-26T03:16:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-26T03:10:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-26T03:04:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-26T02:58:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-26T02:52:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-26T02:46:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-26T02:40:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-26T02:34:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-26T02:28:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-26T02:22:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-26T02:16:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-26T02:10:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-26T02:04:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-26T01:58:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-26T01:52:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-26T01:46:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-26T01:40:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-26T01:34:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-26T01:28:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-26T01:22:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-26T01:16:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-26T01:10:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-26T01:04:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-26T00:58:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-26T00:52:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-26T00:46:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-26T00:40:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-26T00:34:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-26T00:28:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-26T00:22:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-26T00:16:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-26T00:10:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-26T00:04:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-25T23:58:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-25T23:52:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-25T23:46:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-25T23:40:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-25T23:34:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-25T23:28:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-25T23:22:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-25T23:16:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-25T23:10:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-25T23:04:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-25T22:58:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-25T22:52:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-25T22:46:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-25T22:40:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-25T22:34:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-25T22:28:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-25T22:22:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-25T22:16:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-25T22:10:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-25T22:04:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-25T21:58:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-25T21:52:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-25T21:46:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Uflj3yswmd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hZRqknWu2A +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Uflj3yswmd +++ cat /tmp/tmp.hZRqknWu2A +++ rm /tmp/tmp.Uflj3yswmd /tmp/tmp.hZRqknWu2A +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9HoXwUCeG4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nAvVb1ZPZB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9HoXwUCeG4 +++ cat /tmp/tmp.nAvVb1ZPZB +++ rm /tmp/tmp.9HoXwUCeG4 /tmp/tmp.nAvVb1ZPZB +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.api8wwyTyT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aZq7HzXDOX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.api8wwyTyT +++ cat /tmp/tmp.aZq7HzXDOX +++ rm /tmp/tmp.api8wwyTyT /tmp/tmp.aZq7HzXDOX +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.REa05szg8H ++++ mktemp +++ local LAST_ERR=/tmp/tmp.b2cIJGCd4a +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.REa05szg8H +++ cat /tmp/tmp.b2cIJGCd4a +++ rm /tmp/tmp.REa05szg8H /tmp/tmp.b2cIJGCd4a +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iFSgrXhIWJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9IhX0wGwrI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.iFSgrXhIWJ +++ cat /tmp/tmp.9IhX0wGwrI +++ rm /tmp/tmp.iFSgrXhIWJ /tmp/tmp.9IhX0wGwrI +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.p3Uk346jxn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.v25AsMBIwN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.p3Uk346jxn +++ cat /tmp/tmp.v25AsMBIwN +++ rm /tmp/tmp.p3Uk346jxn /tmp/tmp.v25AsMBIwN +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q2x48V2ngE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H16QvEgOEi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Q2x48V2ngE +++ cat /tmp/tmp.H16QvEgOEi +++ rm /tmp/tmp.Q2x48V2ngE /tmp/tmp.H16QvEgOEi +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eIwCxEfsd6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IEA0g83pCl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eIwCxEfsd6 +++ cat /tmp/tmp.IEA0g83pCl +++ rm /tmp/tmp.eIwCxEfsd6 /tmp/tmp.IEA0g83pCl +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4J3R4qiL8c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.y3iw16MjeS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4J3R4qiL8c +++ cat /tmp/tmp.y3iw16MjeS +++ rm /tmp/tmp.4J3R4qiL8c /tmp/tmp.y3iw16MjeS +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TnRYjT4WDp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Hu2rgJrqth +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TnRYjT4WDp +++ cat /tmp/tmp.Hu2rgJrqth +++ rm /tmp/tmp.TnRYjT4WDp /tmp/tmp.Hu2rgJrqth +++ return 0 ++ echo /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 /node_id/803c4de4-2536-4e08-bd31-335767cedb3e /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab /node_id/34094b21-3cb4-4345-865e-1227b85588d8 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 /node_id/803c4de4-2536-4e08-bd31-335767cedb3e /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab /node_id/34094b21-3cb4-4345-865e-1227b85588d8 ++ nodeList=('/node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442' '/node_id/527d8fd0-19c7-4a68-9001-f3637e34be89' '/node_id/803c4de4-2536-4e08-bd31-335767cedb3e' '/node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b' '/node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28' '/node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3' '/node_id/56721bad-1ece-4098-9b8c-708f1b69c24e' '/node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab' '/node_id/34094b21-3cb4-4345-865e-1227b85588d8') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G9aGrcNyQA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3A0abgc2UL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.G9aGrcNyQA ++++ cat /tmp/tmp.3A0abgc2UL ++++ rm /tmp/tmp.G9aGrcNyQA /tmp/tmp.3A0abgc2UL ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QT8YqwiFuX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.y2MAa0BTPX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QT8YqwiFuX ++++ cat /tmp/tmp.y2MAa0BTPX ++++ rm /tmp/tmp.QT8YqwiFuX /tmp/tmp.y2MAa0BTPX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AJvkQr0pug ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JnJaqIGBx9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.AJvkQr0pug +++ cat /tmp/tmp.JnJaqIGBx9 +++ rm /tmp/tmp.AJvkQr0pug /tmp/tmp.JnJaqIGBx9 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.N7OL11GFsD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.0CuFNom4Tp ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.N7OL11GFsD ++++ cat /tmp/tmp.0CuFNom4Tp ++++ rm /tmp/tmp.N7OL11GFsD /tmp/tmp.0CuFNom4Tp ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NfgoMx590h +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.e5uE2uuqXA ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.NfgoMx590h ++++ cat /tmp/tmp.e5uE2uuqXA ++++ rm /tmp/tmp.NfgoMx590h /tmp/tmp.e5uE2uuqXA ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.82q7FqBxqQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yZLGmv9j8u +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.82q7FqBxqQ +++ cat /tmp/tmp.yZLGmv9j8u +++ rm /tmp/tmp.82q7FqBxqQ /tmp/tmp.yZLGmv9j8u +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/803c4de4-2536-4e08-bd31-335767cedb3e ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TEcwNpLYnq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sREh2glOda ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.TEcwNpLYnq ++++ cat /tmp/tmp.sREh2glOda ++++ rm /tmp/tmp.TEcwNpLYnq /tmp/tmp.sREh2glOda ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kpLV0EwS0d +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.MbcrQQcxWZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kpLV0EwS0d ++++ cat /tmp/tmp.MbcrQQcxWZ ++++ rm /tmp/tmp.kpLV0EwS0d /tmp/tmp.MbcrQQcxWZ ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.S7gStYz66H ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MOb1zNL1kg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.S7gStYz66H +++ cat /tmp/tmp.MOb1zNL1kg +++ rm /tmp/tmp.S7gStYz66H /tmp/tmp.MOb1zNL1kg +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7vbLbJmHho +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8AQh79Xm2J ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7vbLbJmHho ++++ cat /tmp/tmp.8AQh79Xm2J ++++ rm /tmp/tmp.7vbLbJmHho /tmp/tmp.8AQh79Xm2J ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4QKf32ZvwE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8ybNzovpqh ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.4QKf32ZvwE ++++ cat /tmp/tmp.8ybNzovpqh ++++ rm /tmp/tmp.4QKf32ZvwE /tmp/tmp.8ybNzovpqh ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dUwdNRWY8v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CpyIgRxkVv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dUwdNRWY8v +++ cat /tmp/tmp.CpyIgRxkVv +++ rm /tmp/tmp.dUwdNRWY8v /tmp/tmp.CpyIgRxkVv +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.i5uV79GN3b +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.CvhHMQvZKZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.i5uV79GN3b ++++ cat /tmp/tmp.CvhHMQvZKZ ++++ rm /tmp/tmp.i5uV79GN3b /tmp/tmp.CvhHMQvZKZ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.cAFVPuIryW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lO07Qnl3fy ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.cAFVPuIryW ++++ cat /tmp/tmp.lO07Qnl3fy ++++ rm /tmp/tmp.cAFVPuIryW /tmp/tmp.lO07Qnl3fy ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7WoAXb8rp9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.arajhLdj0k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7WoAXb8rp9 +++ cat /tmp/tmp.arajhLdj0k +++ rm /tmp/tmp.7WoAXb8rp9 /tmp/tmp.arajhLdj0k +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LBJd3T0x5T +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2kN8drdxym ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LBJd3T0x5T ++++ cat /tmp/tmp.2kN8drdxym ++++ rm /tmp/tmp.LBJd3T0x5T /tmp/tmp.2kN8drdxym ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EqrQeZRgJN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2GtZ8xYNrg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.EqrQeZRgJN ++++ cat /tmp/tmp.2GtZ8xYNrg ++++ rm /tmp/tmp.EqrQeZRgJN /tmp/tmp.2GtZ8xYNrg ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uXl2ifbik4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uGKknr2gtp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uXl2ifbik4 +++ cat /tmp/tmp.uGKknr2gtp +++ rm /tmp/tmp.uXl2ifbik4 /tmp/tmp.uGKknr2gtp +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.g6KRRI95Dl +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.R2cV6tVDJY ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.g6KRRI95Dl ++++ cat /tmp/tmp.R2cV6tVDJY ++++ rm /tmp/tmp.g6KRRI95Dl /tmp/tmp.R2cV6tVDJY ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IXGM4mddCY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.YwhvGhYxQz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IXGM4mddCY ++++ cat /tmp/tmp.YwhvGhYxQz ++++ rm /tmp/tmp.IXGM4mddCY /tmp/tmp.YwhvGhYxQz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CsxQlAqVg8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zBA3fo3mLD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CsxQlAqVg8 +++ cat /tmp/tmp.zBA3fo3mLD +++ rm /tmp/tmp.CsxQlAqVg8 /tmp/tmp.zBA3fo3mLD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PS19vwCa7s +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SwdZiKa8WI ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.PS19vwCa7s ++++ cat /tmp/tmp.SwdZiKa8WI ++++ rm /tmp/tmp.PS19vwCa7s /tmp/tmp.SwdZiKa8WI ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VB9y10cjTT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rR7SwlfJOI ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VB9y10cjTT ++++ cat /tmp/tmp.rR7SwlfJOI ++++ rm /tmp/tmp.VB9y10cjTT /tmp/tmp.rR7SwlfJOI ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2yp0MgtJ5i ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ytsRpn6RTT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2yp0MgtJ5i +++ cat /tmp/tmp.ytsRpn6RTT +++ rm /tmp/tmp.2yp0MgtJ5i /tmp/tmp.ytsRpn6RTT +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ grep /node_id/34094b21-3cb4-4345-865e-1227b85588d8 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wkIJGE6g1L +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yvXn0njFQJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.wkIJGE6g1L ++++ cat /tmp/tmp.yvXn0njFQJ ++++ rm /tmp/tmp.wkIJGE6g1L /tmp/tmp.yvXn0njFQJ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.q52NqeX7Ax +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TbnZhBzeDz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.q52NqeX7Ax ++++ cat /tmp/tmp.TbnZhBzeDz ++++ rm /tmp/tmp.q52NqeX7Ax /tmp/tmp.TbnZhBzeDz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pizAFUPVln ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6jVhsVdQ7l +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pizAFUPVln +++ cat /tmp/tmp.6jVhsVdQ7l +++ rm /tmp/tmp.pizAFUPVln /tmp/tmp.6jVhsVdQ7l +++ return 0 ++ echo /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 /node_id/803c4de4-2536-4e08-bd31-335767cedb3e /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab /node_id/34094b21-3cb4-4345-865e-1227b85588d8 + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/803c4de4-2536-4e08-bd31-335767cedb3e ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/34094b21-3cb4-4345-865e-1227b85588d8 ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.tk13tw6lQ6 ++ mktemp + local LAST_ERR=/tmp/tmp.02NwmQirQ2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tk13tw6lQ6 perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.02NwmQirQ2 + rm /tmp/tmp.tk13tw6lQ6 /tmp/tmp.02NwmQirQ2 + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted.........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.iEZWYQU09Z ++ mktemp + local LAST_ERR=/tmp/tmp.YDtfOpx3yj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iEZWYQU09Z NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 15m + cat /tmp/tmp.YDtfOpx3yj + rm /tmp/tmp.iEZWYQU09Z /tmp/tmp.YDtfOpx3yj + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.np7CNZrMmD ++ mktemp + local LAST_ERR=/tmp/tmp.VOQcivDLV2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.np7CNZrMmD NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 15m + cat /tmp/tmp.VOQcivDLV2 + rm /tmp/tmp.np7CNZrMmD /tmp/tmp.VOQcivDLV2 + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.jYkT0LIMcC ++ mktemp + local LAST_ERR=/tmp/tmp.5VHLuaZtJK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jYkT0LIMcC NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.226.1 27019/TCP 15m + cat /tmp/tmp.5VHLuaZtJK + rm /tmp/tmp.jYkT0LIMcC /tmp/tmp.5VHLuaZtJK + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 /node_id/803c4de4-2536-4e08-bd31-335767cedb3e /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab /node_id/34094b21-3cb4-4345-865e-1227b85588d8 ++ nodeList=('/node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442' '/node_id/527d8fd0-19c7-4a68-9001-f3637e34be89' '/node_id/803c4de4-2536-4e08-bd31-335767cedb3e' '/node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b' '/node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28' '/node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3' '/node_id/56721bad-1ece-4098-9b8c-708f1b69c24e' '/node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab' '/node_id/34094b21-3cb4-4345-865e-1227b85588d8') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/af27ce91-7c65-4a58-bb6c-d52acdee5442 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.undX5nIXGE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.R4oDQ97MTE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.undX5nIXGE ++++ cat /tmp/tmp.R4oDQ97MTE ++++ rm /tmp/tmp.undX5nIXGE /tmp/tmp.R4oDQ97MTE ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.h7ODXSyapH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ZUlo1GQyLg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.h7ODXSyapH ++++ cat /tmp/tmp.ZUlo1GQyLg ++++ rm /tmp/tmp.h7ODXSyapH /tmp/tmp.ZUlo1GQyLg ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.r3Y2nkj2HY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jiS8SgG9PY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.r3Y2nkj2HY +++ cat /tmp/tmp.jiS8SgG9PY +++ rm /tmp/tmp.r3Y2nkj2HY /tmp/tmp.jiS8SgG9PY +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/527d8fd0-19c7-4a68-9001-f3637e34be89 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pFruQDkFlI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pyen86ypp9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pFruQDkFlI ++++ cat /tmp/tmp.pyen86ypp9 ++++ rm /tmp/tmp.pFruQDkFlI /tmp/tmp.pyen86ypp9 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FZzkewx5eR +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hASLOkzsp4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.FZzkewx5eR ++++ cat /tmp/tmp.hASLOkzsp4 ++++ rm /tmp/tmp.FZzkewx5eR /tmp/tmp.hASLOkzsp4 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bw76JtvVrE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KSpbZyKpGq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bw76JtvVrE +++ cat /tmp/tmp.KSpbZyKpGq +++ rm /tmp/tmp.bw76JtvVrE /tmp/tmp.KSpbZyKpGq +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/803c4de4-2536-4e08-bd31-335767cedb3e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.DAe2NwWPc4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pMg82atdxs ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.DAe2NwWPc4 ++++ cat /tmp/tmp.pMg82atdxs ++++ rm /tmp/tmp.DAe2NwWPc4 /tmp/tmp.pMg82atdxs ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Grai2V4vFj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7SaGhT7ANX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Grai2V4vFj ++++ cat /tmp/tmp.7SaGhT7ANX ++++ rm /tmp/tmp.Grai2V4vFj /tmp/tmp.7SaGhT7ANX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ILsHginEdP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.muzrK5C6Xc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ILsHginEdP +++ cat /tmp/tmp.muzrK5C6Xc +++ rm /tmp/tmp.ILsHginEdP /tmp/tmp.muzrK5C6Xc +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d89128b3-bcb4-483a-b2f3-57bf8da9490b +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2JfZNcwGQU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9FIGIhwOOg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.2JfZNcwGQU ++++ cat /tmp/tmp.9FIGIhwOOg ++++ rm /tmp/tmp.2JfZNcwGQU /tmp/tmp.9FIGIhwOOg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sf66LYYWlm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LX5vL7CYoD ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sf66LYYWlm ++++ cat /tmp/tmp.LX5vL7CYoD ++++ rm /tmp/tmp.sf66LYYWlm /tmp/tmp.LX5vL7CYoD ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yDLoV7Gtia ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SUcXdS88Ig +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yDLoV7Gtia +++ cat /tmp/tmp.SUcXdS88Ig +++ rm /tmp/tmp.yDLoV7Gtia /tmp/tmp.SUcXdS88Ig +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ee8f4fcf-f17f-4349-9025-e43a76ee7d28 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oZi3i8tl2a +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nD8nOmZVxn ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.oZi3i8tl2a ++++ cat /tmp/tmp.nD8nOmZVxn ++++ rm /tmp/tmp.oZi3i8tl2a /tmp/tmp.nD8nOmZVxn ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iZBOEjvkJg +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5cbJXsXJcF ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iZBOEjvkJg ++++ cat /tmp/tmp.5cbJXsXJcF ++++ rm /tmp/tmp.iZBOEjvkJg /tmp/tmp.5cbJXsXJcF ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hFQSd43Rjp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.thn7Fcou1P +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hFQSd43Rjp +++ cat /tmp/tmp.thn7Fcou1P +++ rm /tmp/tmp.hFQSd43Rjp /tmp/tmp.thn7Fcou1P +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/7ea6a3d1-758a-4322-a428-d0468496bfe3 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QQylDmttI7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.giwCrf4Pyk ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.QQylDmttI7 ++++ cat /tmp/tmp.giwCrf4Pyk ++++ rm /tmp/tmp.QQylDmttI7 /tmp/tmp.giwCrf4Pyk ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tXuYXM4BzT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.cA0xHjYcRX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tXuYXM4BzT ++++ cat /tmp/tmp.cA0xHjYcRX ++++ rm /tmp/tmp.tXuYXM4BzT /tmp/tmp.cA0xHjYcRX ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FwHWCfukBn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2oCqLQAXOT +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FwHWCfukBn +++ cat /tmp/tmp.2oCqLQAXOT +++ rm /tmp/tmp.FwHWCfukBn /tmp/tmp.2oCqLQAXOT +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ grep /node_id/56721bad-1ece-4098-9b8c-708f1b69c24e +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ViQSZAPezc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Q47zRLxdEx ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ViQSZAPezc ++++ cat /tmp/tmp.Q47zRLxdEx ++++ rm /tmp/tmp.ViQSZAPezc /tmp/tmp.Q47zRLxdEx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7tAkyr9vZv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1LlF1xJFjP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7tAkyr9vZv ++++ cat /tmp/tmp.1LlF1xJFjP ++++ rm /tmp/tmp.7tAkyr9vZv /tmp/tmp.1LlF1xJFjP ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bnVUNNn1OH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.D1FwundCK5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bnVUNNn1OH +++ cat /tmp/tmp.D1FwundCK5 +++ rm /tmp/tmp.bnVUNNn1OH /tmp/tmp.D1FwundCK5 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/bf4c7566-c097-4e22-92b6-62114c1fa6ab ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yTsLzFTotr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.OfQ1P4dkey ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.yTsLzFTotr ++++ cat /tmp/tmp.OfQ1P4dkey ++++ rm /tmp/tmp.yTsLzFTotr /tmp/tmp.OfQ1P4dkey ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ZixMtugAq5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6ws7wWASqj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ZixMtugAq5 ++++ cat /tmp/tmp.6ws7wWASqj ++++ rm /tmp/tmp.ZixMtugAq5 /tmp/tmp.6ws7wWASqj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JfWXzcFNrr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MPuSKe3rYF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JfWXzcFNrr +++ cat /tmp/tmp.MPuSKe3rYF +++ rm /tmp/tmp.JfWXzcFNrr /tmp/tmp.MPuSKe3rYF +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_pmm_service_ip monitoring-service +++ grep /node_id/34094b21-3cb4-4345-865e-1227b85588d8 ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iyjlr0hvic +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EzJVqmVdjo ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iyjlr0hvic ++++ cat /tmp/tmp.EzJVqmVdjo ++++ rm /tmp/tmp.iyjlr0hvic /tmp/tmp.EzJVqmVdjo ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.js7rDnZsaX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1gU9fykKeC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.js7rDnZsaX ++++ cat /tmp/tmp.1gU9fykKeC ++++ rm /tmp/tmp.js7rDnZsaX /tmp/tmp.1gU9fykKeC ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QjDmqI59he ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yfBEH9CfA1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-15594 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.72.232.131/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QjDmqI59he +++ cat /tmp/tmp.yfBEH9CfA1 +++ rm /tmp/tmp.QjDmqI59he /tmp/tmp.yfBEH9CfA1 +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.KU1YkmFdvA ++ mktemp + local LAST_ERR=/tmp/tmp.9MQtApPnOA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KU1YkmFdvA perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.9MQtApPnOA + rm /tmp/tmp.KU1YkmFdvA /tmp/tmp.9MQtApPnOA + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u1LwfeTBzr +++ mktemp ++ local LAST_ERR=/tmp/tmp.InZqPLaaLb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.u1LwfeTBzr ++ cat /tmp/tmp.InZqPLaaLb ++ rm /tmp/tmp.u1LwfeTBzr /tmp/tmp.InZqPLaaLb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Orj7Mruxvo +++ mktemp ++ local LAST_ERR=/tmp/tmp.5YBzYYdSeq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Orj7Mruxvo ++ cat /tmp/tmp.5YBzYYdSeq ++ rm /tmp/tmp.Orj7Mruxvo /tmp/tmp.5YBzYYdSeq ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JcKqTAN5Kl +++ mktemp ++ local LAST_ERR=/tmp/tmp.jFupNJcQaG ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JcKqTAN5Kl ++ cat /tmp/tmp.jFupNJcQaG ++ rm /tmp/tmp.JcKqTAN5Kl /tmp/tmp.jFupNJcQaG ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............. ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O1ciCtHFrH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BZvZc2uOsn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.O1ciCtHFrH +++ cat /tmp/tmp.BZvZc2uOsn +++ rm /tmp/tmp.O1ciCtHFrH /tmp/tmp.BZvZc2uOsn +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tWeVtpFV2u ++++ mktemp +++ local LAST_ERR=/tmp/tmp.D3fo2fjINI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tWeVtpFV2u +++ cat /tmp/tmp.D3fo2fjINI +++ rm /tmp/tmp.tWeVtpFV2u /tmp/tmp.D3fo2fjINI +++ return 0 ++ local ip=34.72.232.131 ++ '[' -n 34.72.232.131 -a 34.72.232.131 '!=' null ']' ++ echo 34.72.232.131 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@34.72.232.131/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-15594-monitoring-mongos-0 /tmp/tmp.Xji4JvHybv/pmm_service_list.json + local pod_service_name=monitoring-2-0-15594-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.Xji4JvHybv/pmm_service_list.json + echo 'Checking monitoring-2-0-15594-monitoring-mongos-0' Checking monitoring-2-0-15594-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-15594-monitoring-mongos-0") | .cluster' /tmp/tmp.Xji4JvHybv/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-15594-monitoring-rs0-0 /tmp/tmp.Xji4JvHybv/pmm_service_list.json + local pod_service_name=monitoring-2-0-15594-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.Xji4JvHybv/pmm_service_list.json + echo 'Checking monitoring-2-0-15594-monitoring-rs0-0' Checking monitoring-2-0-15594-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-15594-monitoring-rs0-0") | .cluster' /tmp/tmp.Xji4JvHybv/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-15594-monitoring-cfg-0 /tmp/tmp.Xji4JvHybv/pmm_service_list.json + local pod_service_name=monitoring-2-0-15594-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.Xji4JvHybv/pmm_service_list.json + echo 'Checking monitoring-2-0-15594-monitoring-cfg-0' Checking monitoring-2-0-15594-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-15594-monitoring-cfg-0") | .cluster' /tmp/tmp.Xji4JvHybv/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1JuoZK8QFr +++ mktemp ++ local LAST_ERR=/tmp/tmp.dMs8TyTIJz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.1JuoZK8QFr ++ cat /tmp/tmp.dMs8TyTIJz ++ rm /tmp/tmp.1JuoZK8QFr /tmp/tmp.dMs8TyTIJz ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-15594 + local namespace=monitoring-2-0-15594 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.2x7BxCWHrc +++ mktemp ++ local LAST_ERR=/tmp/tmp.31duXnd7tU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2x7BxCWHrc ++ cat /tmp/tmp.31duXnd7tU No resources found in monitoring-2-0-15594 namespace. ++ rm /tmp/tmp.2x7BxCWHrc /tmp/tmp.31duXnd7tU ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.i8BG2xQegD ++ mktemp + local LAST_ERR=/tmp/tmp.KVfxAllaQn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i8BG2xQegD customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.KVfxAllaQn + rm /tmp/tmp.i8BG2xQegD /tmp/tmp.KVfxAllaQn + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.COwzJYeDaj ++ mktemp + local LAST_ERR=/tmp/tmp.RbESUrBMee + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.COwzJYeDaj + cat /tmp/tmp.RbESUrBMee + rm /tmp/tmp.COwzJYeDaj /tmp/tmp.RbESUrBMee + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0TgljHJe4H ++ mktemp + local LAST_ERR=/tmp/tmp.lJqfGLxpfU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0TgljHJe4H + cat /tmp/tmp.lJqfGLxpfU + rm /tmp/tmp.0TgljHJe4H /tmp/tmp.lJqfGLxpfU + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.emQbeetiXe ++ mktemp + local LAST_ERR=/tmp/tmp.ew63wFL1YK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.emQbeetiXe + cat /tmp/tmp.ew63wFL1YK + rm /tmp/tmp.emQbeetiXe /tmp/tmp.ew63wFL1YK + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.OpJ9EN5weO ++ mktemp + local LAST_ERR=/tmp/tmp.L8kRlu94kh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2276/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OpJ9EN5weO clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.L8kRlu94kh + rm /tmp/tmp.OpJ9EN5weO /tmp/tmp.L8kRlu94kh + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.N0FDUgQEzk ++ mktemp + local LAST_ERR=/tmp/tmp.qVJkoUOvMo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N0FDUgQEzk namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.qVJkoUOvMo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N0FDUgQEzk namespace "cert-manager" deleted + cat /tmp/tmp.qVJkoUOvMo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.N0FDUgQEzk + cat /tmp/tmp.qVJkoUOvMo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.N0FDUgQEzk + cat /tmp/tmp.qVJkoUOvMo Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.N0FDUgQEzk /tmp/tmp.qVJkoUOvMo + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-15594 + rm -rf /tmp/tmp.Xji4JvHybv + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.0b4pewvZG3 ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.XqJ0e1n8kr ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.sicW6M4Abc + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.fiAHHHpa4s ++ seq 0 2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-15594 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0b4pewvZG3 namespace "psmdb-operator" force deleted + cat /tmp/tmp.sicW6M4Abc Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.0b4pewvZG3 /tmp/tmp.sicW6M4Abc + return 0