Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-28042 + local ns=monitoring-2-0-28042 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.oitSK3rnlY ++ mktemp + local LAST_ERR=/tmp/tmp.xyEmal6b2R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oitSK3rnlY customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.xyEmal6b2R + rm /tmp/tmp.oitSK3rnlY /tmp/tmp.xyEmal6b2R + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OJPGphwi1y ++ mktemp + local LAST_ERR=/tmp/tmp.zdiLAhlnfn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OJPGphwi1y + cat /tmp/tmp.zdiLAhlnfn + rm /tmp/tmp.OJPGphwi1y /tmp/tmp.zdiLAhlnfn + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SZKYvLda6T ++ mktemp + local LAST_ERR=/tmp/tmp.k1N3igfvj7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SZKYvLda6T + cat /tmp/tmp.k1N3igfvj7 + rm /tmp/tmp.SZKYvLda6T /tmp/tmp.k1N3igfvj7 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.aLTJZKCijP ++ mktemp + local LAST_ERR=/tmp/tmp.GWcHV59IJF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aLTJZKCijP + cat /tmp/tmp.GWcHV59IJF + rm /tmp/tmp.aLTJZKCijP /tmp/tmp.GWcHV59IJF + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.PFgoyrvHoC ++ mktemp + local LAST_ERR=/tmp/tmp.KNI9dIBaqu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PFgoyrvHoC clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.KNI9dIBaqu + rm /tmp/tmp.PFgoyrvHoC /tmp/tmp.KNI9dIBaqu + return 0 + check_crd_for_deletion PR-2266-f9fc55604 + local git_tag=PR-2266-f9fc55604 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2266-f9fc55604/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e5MLZZvND6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.lROMHMQm3z ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.e5MLZZvND6 ++ cat /tmp/tmp.lROMHMQm3z Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.e5MLZZvND6 ++ cat /tmp/tmp.lROMHMQm3z Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.e5MLZZvND6 ++ cat /tmp/tmp.lROMHMQm3z Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.e5MLZZvND6 ++ cat /tmp/tmp.lROMHMQm3z Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.e5MLZZvND6 /tmp/tmp.lROMHMQm3z ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.AoB3KG3aQl + local LAST_OUT=/tmp/tmp.3ybXCOy8gk ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.YfKu5bs93E + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.kBymqXxk7I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3ybXCOy8gk + cat /tmp/tmp.YfKu5bs93E + rm /tmp/tmp.3ybXCOy8gk /tmp/tmp.YfKu5bs93E + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-19526" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AoB3KG3aQl namespace "psmdb-operator" deleted + cat /tmp/tmp.kBymqXxk7I + rm /tmp/tmp.AoB3KG3aQl /tmp/tmp.kBymqXxk7I + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BBav6z4K7x ++ mktemp + local LAST_ERR=/tmp/tmp.r2iQ80j0vV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BBav6z4K7x + cat /tmp/tmp.r2iQ80j0vV + rm /tmp/tmp.BBav6z4K7x /tmp/tmp.r2iQ80j0vV + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.LKg6r9yyH1 ++ mktemp + local LAST_ERR=/tmp/tmp.jJjCwGjAos + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LKg6r9yyH1 namespace/psmdb-operator created + cat /tmp/tmp.jJjCwGjAos + rm /tmp/tmp.LKg6r9yyH1 /tmp/tmp.jJjCwGjAos + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.x7RSi9LMuA +++ mktemp ++ local LAST_ERR=/tmp/tmp.BYJ5VyniEO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.x7RSi9LMuA ++ cat /tmp/tmp.BYJ5VyniEO ++ rm /tmp/tmp.x7RSi9LMuA /tmp/tmp.BYJ5VyniEO ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ByJaEgcDh9 ++ mktemp + local LAST_ERR=/tmp/tmp.oO2yHyLWYf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ByJaEgcDh9 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15" modified. + cat /tmp/tmp.oO2yHyLWYf + rm /tmp/tmp.ByJaEgcDh9 /tmp/tmp.oO2yHyLWYf + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604 ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NutTKnibWp ++ mktemp + local LAST_ERR=/tmp/tmp.YaLyQi1sHD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NutTKnibWp customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.YaLyQi1sHD + rm /tmp/tmp.NutTKnibWp /tmp/tmp.YaLyQi1sHD + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PmblkG5rPX ++ mktemp + local LAST_ERR=/tmp/tmp.f3x7JoHGq2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PmblkG5rPX clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.f3x7JoHGq2 + rm /tmp/tmp.PmblkG5rPX /tmp/tmp.f3x7JoHGq2 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Ic2LjFg6xR ++ mktemp + local LAST_ERR=/tmp/tmp.HDm90v0aC0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ic2LjFg6xR deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.HDm90v0aC0 + rm /tmp/tmp.Ic2LjFg6xR /tmp/tmp.HDm90v0aC0 + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.X8f2n176hT +++ mktemp ++ local LAST_ERR=/tmp/tmp.8cRiTTPs0s ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.X8f2n176hT ++ cat /tmp/tmp.8cRiTTPs0s ++ rm /tmp/tmp.X8f2n176hT /tmp/tmp.8cRiTTPs0s ++ return 0 + wait_operator_pod percona-server-mongodb-operator-5554c4df68-zbmhg + local pod=percona-server-mongodb-operator-5554c4df68-zbmhg + set +o xtrace waiting for pod/percona-server-mongodb-operator-5554c4df68-zbmhg to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.FZLBHa5LPV +++ mktemp ++ local LAST_ERR=/tmp/tmp.OVQROhHxF2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FZLBHa5LPV ++ cat /tmp/tmp.OVQROhHxF2 ++ rm /tmp/tmp.FZLBHa5LPV /tmp/tmp.OVQROhHxF2 ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-5554c4df68-zbmhg ++ mktemp + local LAST_OUT=/tmp/tmp.RFmwVNu3vq ++ mktemp + local LAST_ERR=/tmp/tmp.liBt2JwORa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-5554c4df68-zbmhg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RFmwVNu3vq + cat /tmp/tmp.liBt2JwORa + rm /tmp/tmp.RFmwVNu3vq /tmp/tmp.liBt2JwORa + return 0 2026-03-10T07:28:39.490Z INFO setup Manager starting up {"gitCommit": "f9fc5560468cd2df79998ec06ba830d1104a31a3", "gitBranch": "PR-2266-f9fc55604", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-28042 + local namespace=monitoring-2-0-28042 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrolebinding ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-28042' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-28042 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-28042 --ignore-not-found ++ mktemp + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_OUT=/tmp/tmp.HSJRBKLPTx ++ mktemp + local LAST_OUT=/tmp/tmp.m7Lmk1GYPn ++ mktemp + local LAST_ERR=/tmp/tmp.RkSEfyd0S1 + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.ae4Mb3Prqm + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-28042 --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m7Lmk1GYPn + cat /tmp/tmp.ae4Mb3Prqm + rm /tmp/tmp.m7Lmk1GYPn /tmp/tmp.ae4Mb3Prqm + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HSJRBKLPTx + cat /tmp/tmp.RkSEfyd0S1 + rm /tmp/tmp.HSJRBKLPTx /tmp/tmp.RkSEfyd0S1 + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-28042 ++ mktemp + local LAST_OUT=/tmp/tmp.Wkw5fnqEqG ++ mktemp + local LAST_ERR=/tmp/tmp.oynv7L9zIw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-28042 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Wkw5fnqEqG + cat /tmp/tmp.oynv7L9zIw + rm /tmp/tmp.Wkw5fnqEqG /tmp/tmp.oynv7L9zIw + return 0 + desc 'create namespace monitoring-2-0-28042' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-28042 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-28042 ++ mktemp + local LAST_OUT=/tmp/tmp.kpIRL5JLlA ++ mktemp + local LAST_ERR=/tmp/tmp.iYZwlewrGa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-28042 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kpIRL5JLlA namespace/monitoring-2-0-28042 created + cat /tmp/tmp.iYZwlewrGa + rm /tmp/tmp.kpIRL5JLlA /tmp/tmp.iYZwlewrGa + return 0 + set_kube_ctx monitoring-2-0-28042 + local namespace=monitoring-2-0-28042 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.DEOocnKQKs +++ mktemp ++ local LAST_ERR=/tmp/tmp.pbQ7p0sy9B ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DEOocnKQKs ++ cat /tmp/tmp.pbQ7p0sy9B ++ rm /tmp/tmp.DEOocnKQKs /tmp/tmp.pbQ7p0sy9B ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15 --namespace=monitoring-2-0-28042 ++ mktemp + local LAST_OUT=/tmp/tmp.lNnJ7KftLt ++ mktemp + local LAST_ERR=/tmp/tmp.FHUAPXOo5w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15 --namespace=monitoring-2-0-28042 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lNnJ7KftLt Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2266-f9fc55604-9-cluster15" modified. + cat /tmp/tmp.FHUAPXOo5w + rm /tmp/tmp.lNnJ7KftLt /tmp/tmp.FHUAPXOo5w + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.7NF7jzaW1T ++ mktemp + local LAST_ERR=/tmp/tmp.qq88znl1fu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7NF7jzaW1T namespace/cert-manager created + cat /tmp/tmp.qq88znl1fu + rm /tmp/tmp.7NF7jzaW1T /tmp/tmp.qq88znl1fu + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.lpXMlWyzKP ++ mktemp + local LAST_ERR=/tmp/tmp.MUgP27nIaA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lpXMlWyzKP namespace/cert-manager labeled + cat /tmp/tmp.MUgP27nIaA + rm /tmp/tmp.lpXMlWyzKP /tmp/tmp.MUgP27nIaA + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.S8mKqhqwpf ++ mktemp + local LAST_ERR=/tmp/tmp.eoAnC02z4X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.S8mKqhqwpf namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.eoAnC02z4X Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.S8mKqhqwpf /tmp/tmp.eoAnC02z4X + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.3vqfGhhr12 ++ mktemp + local LAST_ERR=/tmp/tmp.MBfsDlzpSC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3vqfGhhr12 pod/cert-manager-559d798845-7ggrq condition met pod/cert-manager-cainjector-64958d9c7c-gxnx8 condition met pod/cert-manager-webhook-7fb6f99b56-rnckx condition met + cat /tmp/tmp.MBfsDlzpSC + rm /tmp/tmp.3vqfGhhr12 /tmp/tmp.MBfsDlzpSC + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Mar 10 07:31:44 2026 NAMESPACE: monitoring-2-0-28042 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-28042.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.THyNPBDuRU ++ mktemp + local LAST_ERR=/tmp/tmp.ALZX4py3DF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.THyNPBDuRU + cat /tmp/tmp.ALZX4py3DF + rm /tmp/tmp.THyNPBDuRU /tmp/tmp.ALZX4py3DF + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.WJQvqGlPc0 ++ mktemp + local LAST_ERR=/tmp/tmp.966e3Eu2Al + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WJQvqGlPc0 secret/some-users created secret/some-users unchanged + cat /tmp/tmp.966e3Eu2Al + rm /tmp/tmp.WJQvqGlPc0 /tmp/tmp.966e3Eu2Al + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EXjDQPfnUh ++ mktemp + local LAST_ERR=/tmp/tmp.T3JMSounhn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EXjDQPfnUh deployment.apps/psmdb-client created + cat /tmp/tmp.T3JMSounhn + rm /tmp/tmp.EXjDQPfnUh /tmp/tmp.T3JMSounhn + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml ++ mktemp + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2266-f9fc55604"' + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-28042/g + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.wZ8A6f80gh + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_ERR=/tmp/tmp.AYFo1RaM2A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wZ8A6f80gh perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.AYFo1RaM2A + rm /tmp/tmp.wZ8A6f80gh /tmp/tmp.AYFo1RaM2A + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.m1CvnozfDS +++ mktemp ++ local LAST_ERR=/tmp/tmp.hA2r1NmLx7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.m1CvnozfDS ++ cat /tmp/tmp.hA2r1NmLx7 ++ rm /tmp/tmp.m1CvnozfDS /tmp/tmp.hA2r1NmLx7 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lOjmcyutk1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.qFKzGPCN1S ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lOjmcyutk1 ++ cat /tmp/tmp.qFKzGPCN1S ++ rm /tmp/tmp.lOjmcyutk1 /tmp/tmp.qFKzGPCN1S ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nhVEBnmhlS +++ mktemp ++ local LAST_ERR=/tmp/tmp.e9zECZzb5t ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.nhVEBnmhlS ++ cat /tmp/tmp.e9zECZzb5t ++ rm /tmp/tmp.nhVEBnmhlS /tmp/tmp.e9zECZzb5t ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness........................ + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.aLO9zgYpMS ++ mktemp + local LAST_ERR=/tmp/tmp.okqNvpWlBV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aLO9zgYpMS + cat /tmp/tmp.okqNvpWlBV + rm /tmp/tmp.aLO9zgYpMS /tmp/tmp.okqNvpWlBV + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-10T07:36:28+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-28042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-28042 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.fUiVlt1Rch +++ mktemp ++ local LAST_ERR=/tmp/tmp.pS4s7t2PQi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.fUiVlt1Rch ++ cat /tmp/tmp.pS4s7t2PQi ++ rm /tmp/tmp.fUiVlt1Rch /tmp/tmp.pS4s7t2PQi ++ return 0 + local client_container=psmdb-client-699f458f75-9jk66 + kubectl_bin exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.giCNpOoa7Q ++ mktemp + local LAST_ERR=/tmp/tmp.yN5JTc6BkP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.giCNpOoa7Q Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T07:36:41.265Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c3903398-7925-4992-9cc3-b11516c0749f") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.yN5JTc6BkP + rm /tmp/tmp.giCNpOoa7Q /tmp/tmp.yN5JTc6BkP + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-28042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-28042 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GJKB3TeTBL +++ mktemp ++ local LAST_ERR=/tmp/tmp.if1CkIN6ei ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.GJKB3TeTBL ++ cat /tmp/tmp.if1CkIN6ei ++ rm /tmp/tmp.GJKB3TeTBL /tmp/tmp.if1CkIN6ei ++ return 0 + local client_container=psmdb-client-699f458f75-9jk66 + kubectl_bin exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.tRxSm80eXE ++ mktemp + local LAST_ERR=/tmp/tmp.fTihX1hAtg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tRxSm80eXE Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T07:36:43.416Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("40174293-50d8-4d8f-9eee-1e39c961437f") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1773128203, 8), "signature" : { "hash" : BinData(0,"IWHGGGkEktKbrtecaEb9YDJpuAU="), "keyId" : NumberLong("7615527252658225176") } }, "operationTime" : Timestamp(1773128203, 5) } bye + cat /tmp/tmp.fTihX1hAtg + rm /tmp/tmp.tRxSm80eXE /tmp/tmp.fTihX1hAtg + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-28042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-28042 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RBUtzI0Yy8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3J60jhr4ZZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RBUtzI0Yy8 ++ cat /tmp/tmp.3J60jhr4ZZ ++ rm /tmp/tmp.RBUtzI0Yy8 /tmp/tmp.3J60jhr4ZZ ++ return 0 + local client_container=psmdb-client-699f458f75-9jk66 + kubectl_bin exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.WByQ3OsPtk ++ mktemp + local LAST_ERR=/tmp/tmp.B8Awg0QL2i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WByQ3OsPtk Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T07:36:46.376Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b23079c0-9c2e-4aa1-9510-fadcbb6305d4") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.B8Awg0QL2i + rm /tmp/tmp.WByQ3OsPtk /tmp/tmp.B8Awg0QL2i + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-28042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-28042 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tYIMRQsTJA +++ mktemp ++ local LAST_ERR=/tmp/tmp.Omh20bY3rM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tYIMRQsTJA ++ cat /tmp/tmp.Omh20bY3rM ++ rm /tmp/tmp.tYIMRQsTJA /tmp/tmp.Omh20bY3rM ++ return 0 + local client_container=psmdb-client-699f458f75-9jk66 + kubectl_bin exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Tg3q1iYqC4 ++ mktemp + local LAST_ERR=/tmp/tmp.rWxKqG44FN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Tg3q1iYqC4 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T07:36:49.038Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b6150254-57a3-40f3-8e2b-40a2b85622de") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.rWxKqG44FN + rm /tmp/tmp.Tg3q1iYqC4 /tmp/tmp.rWxKqG44FN + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-28042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-28042 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jAjbY1618l +++ mktemp ++ local LAST_ERR=/tmp/tmp.vyaAwddx35 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jAjbY1618l ++ cat /tmp/tmp.vyaAwddx35 ++ rm /tmp/tmp.jAjbY1618l /tmp/tmp.vyaAwddx35 ++ return 0 + local client_container=psmdb-client-699f458f75-9jk66 + kubectl_bin exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.6uk5aNdcui ++ mktemp + local LAST_ERR=/tmp/tmp.e5DjFhKWMc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9jk66 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6uk5aNdcui Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-28042.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T07:36:51.179Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("be47c3bc-03e0-44fc-9937-311256890237") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.e5DjFhKWMc + rm /tmp/tmp.6uk5aNdcui /tmp/tmp.e5DjFhKWMc + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.djmBgCf1Ol +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NiEf9FjPel ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.djmBgCf1Ol ++++ cat /tmp/tmp.NiEf9FjPel ++++ rm /tmp/tmp.djmBgCf1Ol /tmp/tmp.NiEf9FjPel ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0o1AzagZAd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.q39eBsOnAY ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.0o1AzagZAd ++++ cat /tmp/tmp.q39eBsOnAY ++++ rm /tmp/tmp.0o1AzagZAd /tmp/tmp.q39eBsOnAY ++++ return 0 +++ local ip=136.111.183.188 +++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' +++ echo 136.111.183.188 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@136.111.183.188/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 283 85 --:--:-- --:--:-- --:--:-- 369 + API_KEY='"eyJrIjoiU2F0MVBYY3lBTVNkSVdqeG5xRm16d2JLRjhiRnJ1QnkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiU2F0MVBYY3lBTVNkSVdqeG5xRm16d2JLRjhiRnJ1QnkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.eq4PBn5WzP ++ mktemp + local LAST_ERR=/tmp/tmp.u6dO5rBXCc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiU2F0MVBYY3lBTVNkSVdqeG5xRm16d2JLRjhiRnJ1QnkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eq4PBn5WzP secret/some-users patched + cat /tmp/tmp.u6dO5rBXCc + rm /tmp/tmp.eq4PBn5WzP /tmp/tmp.u6dO5rBXCc + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7S1qURvetJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.4vfAHKBlQz ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7S1qURvetJ ++ cat /tmp/tmp.4vfAHKBlQz ++ rm /tmp/tmp.7S1qURvetJ /tmp/tmp.4vfAHKBlQz ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gSNPbBcGjX +++ mktemp ++ local LAST_ERR=/tmp/tmp.p1rLqBGJ7J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gSNPbBcGjX ++ cat /tmp/tmp.p1rLqBGJ7J ++ rm /tmp/tmp.gSNPbBcGjX /tmp/tmp.p1rLqBGJ7J ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EZxXmmTRzL +++ mktemp ++ local LAST_ERR=/tmp/tmp.MsRVW7OM1C ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.EZxXmmTRzL ++ cat /tmp/tmp.MsRVW7OM1C ++ rm /tmp/tmp.EZxXmmTRzL /tmp/tmp.MsRVW7OM1C ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Uxc0FV3NWT ++ mktemp + local LAST_ERR=/tmp/tmp.Wuzs6ob4bH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Uxc0FV3NWT + cat /tmp/tmp.Wuzs6ob4bH + rm /tmp/tmp.Uxc0FV3NWT /tmp/tmp.Wuzs6ob4bH + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.rwbmdV3g89/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-10T07:43:44+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.rwbmdV3g89/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WMnD7jkvOH ++ mktemp + local LAST_ERR=/tmp/tmp.6AwypxXa8G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WMnD7jkvOH + cat /tmp/tmp.6AwypxXa8G + rm /tmp/tmp.WMnD7jkvOH /tmp/tmp.6AwypxXa8G + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.rwbmdV3g89/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-10T07:43:45+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.rwbmdV3g89/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | ++ mktemp del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.EC8DNJAD3N ++ mktemp + local LAST_ERR=/tmp/tmp.MWG3LXsfSy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EC8DNJAD3N + cat /tmp/tmp.MWG3LXsfSy + rm /tmp/tmp.EC8DNJAD3N /tmp/tmp.MWG3LXsfSy + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/service_monitoring-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.32 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.rwbmdV3g89/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-10T07:43:45+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.rwbmdV3g89/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KpsS884Qyf ++ mktemp + local LAST_ERR=/tmp/tmp.jM4sfU9aJ6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KpsS884Qyf + cat /tmp/tmp.jM4sfU9aJ6 + rm /tmp/tmp.KpsS884Qyf /tmp/tmp.jM4sfU9aJ6 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.rwbmdV3g89/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-10T07:43:46+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.rwbmdV3g89/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-28042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.IqXGVSwlap ++ mktemp + local LAST_ERR=/tmp/tmp.yUuB7zGag4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IqXGVSwlap + cat /tmp/tmp.yUuB7zGag4 + rm /tmp/tmp.IqXGVSwlap /tmp/tmp.yUuB7zGag4 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.rwbmdV3g89/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.rwbmdV3g89/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-10T07:43:48+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-28042-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-28042-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773128568 ++ /usr/sbin/date -u +%s + local end=1773128628 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.e53oDP1z7z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ag79hSinG9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.e53oDP1z7z +++ cat /tmp/tmp.ag79hSinG9 +++ rm /tmp/tmp.e53oDP1z7z /tmp/tmp.ag79hSinG9 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.eynJAoRImQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RUAXN1E5Ia +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.eynJAoRImQ +++ cat /tmp/tmp.RUAXN1E5Ia +++ rm /tmp/tmp.eynJAoRImQ /tmp/tmp.RUAXN1E5Ia +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + local endpoint=136.111.183.188 + curl -s -k 'https://admin:admin@136.111.183.188/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-rs0-1%22%7D%29&start=1773128568&end=1773128628&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1773122547" "1773122547" + get_metric_values mongodb_connections monitoring-2-0-28042-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-28042-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773128570 ++ /usr/sbin/date -u +%s + local end=1773128630 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LhTiOuxEIK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QqCXLOoFJD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LhTiOuxEIK +++ cat /tmp/tmp.QqCXLOoFJD +++ rm /tmp/tmp.LhTiOuxEIK /tmp/tmp.QqCXLOoFJD +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.yvrjGgrRkp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AXjHTH6yfZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yvrjGgrRkp +++ cat /tmp/tmp.AXjHTH6yfZ +++ rm /tmp/tmp.yvrjGgrRkp /tmp/tmp.AXjHTH6yfZ +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + local endpoint=136.111.183.188 + grep '^"[0-9]' + curl -s -k 'https://admin:admin@136.111.183.188/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-rs0-1%22%7D%29&start=1773128570&end=1773128630&step=60' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-28042-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-28042-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773128572 ++ /usr/sbin/date -u +%s + local end=1773128632 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cRgmsPCxzk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gjmwPUuxTA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cRgmsPCxzk +++ cat /tmp/tmp.gjmwPUuxTA +++ rm /tmp/tmp.cRgmsPCxzk /tmp/tmp.gjmwPUuxTA +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L4bp19Bljo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hOij1ur5YL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.L4bp19Bljo +++ cat /tmp/tmp.hOij1ur5YL +++ rm /tmp/tmp.L4bp19Bljo /tmp/tmp.hOij1ur5YL +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + local endpoint=136.111.183.188 + curl -s -k 'https://admin:admin@136.111.183.188/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-cfg-1%22%7D%29&start=1773128572&end=1773128632&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1773122639" "1773122639" + get_metric_values mongodb_connections monitoring-2-0-28042-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-28042-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773128574 ++ /usr/sbin/date -u +%s + local end=1773128634 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.513gzVZg8V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8xXBTGE7OL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.513gzVZg8V +++ cat /tmp/tmp.8xXBTGE7OL +++ rm /tmp/tmp.513gzVZg8V /tmp/tmp.8xXBTGE7OL +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9N8PQHFFFq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ma0podMB6B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9N8PQHFFFq +++ cat /tmp/tmp.ma0podMB6B +++ rm /tmp/tmp.9N8PQHFFFq /tmp/tmp.ma0podMB6B +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + local endpoint=136.111.183.188 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@136.111.183.188/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-cfg-1%22%7D%29&start=1773128574&end=1773128634&step=60' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-28042-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-28042-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773128578 ++ /usr/sbin/date -u +%s + local end=1773128638 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8lGHFuJA49 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vykkVz2Ada +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8lGHFuJA49 +++ cat /tmp/tmp.vykkVz2Ada +++ rm /tmp/tmp.8lGHFuJA49 /tmp/tmp.vykkVz2Ada +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T1JK5QQtyY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H2nfYfiCIL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T1JK5QQtyY +++ cat /tmp/tmp.H2nfYfiCIL +++ rm /tmp/tmp.T1JK5QQtyY /tmp/tmp.H2nfYfiCIL +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + local endpoint=136.111.183.188 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@136.111.183.188/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-28042-monitoring-mongos-0%22%7D%29&start=1773128578&end=1773128638&step=60' "1773122547" "1773122547" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-09T19:45:30+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-10T07:45:30+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VFv2isP0AO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oPmzFxcZz7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.VFv2isP0AO +++ cat /tmp/tmp.oPmzFxcZz7 +++ rm /tmp/tmp.VFv2isP0AO /tmp/tmp.oPmzFxcZz7 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.hIXz4q4hJS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S7m8I3za3o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hIXz4q4hJS +++ cat /tmp/tmp.S7m8I3za3o +++ rm /tmp/tmp.hIXz4q4hJS /tmp/tmp.S7m8I3za3o +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + endpoint=136.111.183.188 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@136.111.183.188/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-09T19:45:33+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-10T07:45:33+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.htI93rcoi5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FMuElBHYpg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.htI93rcoi5 +++ cat /tmp/tmp.FMuElBHYpg +++ rm /tmp/tmp.htI93rcoi5 /tmp/tmp.FMuElBHYpg +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LQ9CfaxCcL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N8lDsGcCa7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.LQ9CfaxCcL +++ cat /tmp/tmp.N8lDsGcCa7 +++ rm /tmp/tmp.LQ9CfaxCcL /tmp/tmp.N8lDsGcCa7 +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + endpoint=136.111.183.188 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@136.111.183.188/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-10T07:45:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T07:39:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T07:33:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T07:27:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T07:21:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T07:15:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T07:09:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T07:03:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T06:57:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T06:51:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T06:45:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T06:39:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T06:33:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T06:27:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T06:21:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T06:15:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T06:09:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T06:03:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T05:57:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T05:51:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T05:45:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T05:39:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T05:33:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T05:27:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T05:21:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T05:15:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T05:09:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T05:03:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T04:57:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T04:51:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T04:45:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T04:39:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T04:33:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T04:27:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T04:21:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T04:15:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T04:09:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T04:03:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T03:57:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T03:51:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T03:45:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T03:39:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T03:33:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T03:27:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T03:21:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T03:15:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T03:09:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T03:03:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T02:57:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T02:51:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T02:45:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T02:39:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T02:33:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T02:27:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T02:21:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T02:15:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T02:09:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T02:03:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T01:57:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T01:51:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T01:45:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T01:39:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T01:33:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T01:27:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T01:21:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T01:15:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T01:09:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T01:03:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T00:57:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T00:51:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T00:45:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T00:39:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T00:33:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T00:27:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T00:21:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T00:15:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T00:09:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T00:03:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-09T23:57:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-09T23:51:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-09T23:45:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-09T23:39:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-09T23:33:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-09T23:27:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-09T23:21:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-09T23:15:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-09T23:09:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-09T23:03:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-09T22:57:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-09T22:51:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-09T22:45:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-09T22:39:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-09T22:33:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-09T22:27:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-09T22:21:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-09T22:15:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-09T22:09:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-09T22:03:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-09T21:57:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-09T21:51:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-09T21:45:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-09T21:39:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-09T21:33:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-09T21:27:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-09T21:21:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-09T21:15:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-09T21:09:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-09T21:03:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-09T20:57:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-09T20:51:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-09T20:45:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-09T20:39:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-09T20:33:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-09T20:27:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-09T20:21:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-09T20:15:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-09T20:09:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-09T20:03:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-09T19:57:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-09T19:51:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gCTxxmR9Rv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LkbloMNDOZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gCTxxmR9Rv +++ cat /tmp/tmp.LkbloMNDOZ +++ rm /tmp/tmp.gCTxxmR9Rv /tmp/tmp.LkbloMNDOZ +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TVwXY0uxQP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l2oYIpIIKb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TVwXY0uxQP +++ cat /tmp/tmp.l2oYIpIIKb +++ rm /tmp/tmp.TVwXY0uxQP /tmp/tmp.l2oYIpIIKb +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4qEvNVZjUJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KIv8qLiK5T +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4qEvNVZjUJ +++ cat /tmp/tmp.KIv8qLiK5T +++ rm /tmp/tmp.4qEvNVZjUJ /tmp/tmp.KIv8qLiK5T +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Wmj2VlWn2Z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ds9Wx2UWx7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Wmj2VlWn2Z +++ cat /tmp/tmp.Ds9Wx2UWx7 +++ rm /tmp/tmp.Wmj2VlWn2Z /tmp/tmp.Ds9Wx2UWx7 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CuC0c3xGis ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CTyq8C2LE8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.CuC0c3xGis +++ cat /tmp/tmp.CTyq8C2LE8 +++ rm /tmp/tmp.CuC0c3xGis /tmp/tmp.CTyq8C2LE8 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BpMPRqTHaW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AjVHSvIB5Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BpMPRqTHaW +++ cat /tmp/tmp.AjVHSvIB5Y +++ rm /tmp/tmp.BpMPRqTHaW /tmp/tmp.AjVHSvIB5Y +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XomWkNK9u7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AR9KsoSOyL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XomWkNK9u7 +++ cat /tmp/tmp.AR9KsoSOyL +++ rm /tmp/tmp.XomWkNK9u7 /tmp/tmp.AR9KsoSOyL +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jXSp46DrCs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vhyVHmmRWr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jXSp46DrCs +++ cat /tmp/tmp.vhyVHmmRWr +++ rm /tmp/tmp.jXSp46DrCs /tmp/tmp.vhyVHmmRWr +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9fRYfaqjCp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LgS2hE6R71 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9fRYfaqjCp +++ cat /tmp/tmp.LgS2hE6R71 +++ rm /tmp/tmp.9fRYfaqjCp /tmp/tmp.LgS2hE6R71 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EVbzUqCItc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1jns9ms7mD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EVbzUqCItc +++ cat /tmp/tmp.1jns9ms7mD +++ rm /tmp/tmp.EVbzUqCItc /tmp/tmp.1jns9ms7mD +++ return 0 ++ echo /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 /node_id/3decb696-753d-465a-8852-b26735dc588f /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 /node_id/3decb696-753d-465a-8852-b26735dc588f /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 ++ nodeList=('/node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11' '/node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d' '/node_id/ddf59699-37f4-4f35-8b94-77c0af91006d' '/node_id/9e117987-4f46-46c3-8483-b01a4eedc624' '/node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318' '/node_id/66eafeac-36af-4076-92a8-aee71cb01f1e' '/node_id/40860f87-38a1-4feb-bea9-3fe4541c1898' '/node_id/3decb696-753d-465a-8852-b26735dc588f' '/node_id/79bd9f9d-078a-4c53-b421-0432fbe33525') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MO8OKIAYFF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.66XNboNGCH ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.MO8OKIAYFF ++++ cat /tmp/tmp.66XNboNGCH ++++ rm /tmp/tmp.MO8OKIAYFF /tmp/tmp.66XNboNGCH ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hh3BaEooMf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.K2GBtrtWJv ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hh3BaEooMf ++++ cat /tmp/tmp.K2GBtrtWJv ++++ rm /tmp/tmp.hh3BaEooMf /tmp/tmp.K2GBtrtWJv ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RKkx91rWtQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P9sirlPugx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RKkx91rWtQ +++ cat /tmp/tmp.P9sirlPugx +++ rm /tmp/tmp.RKkx91rWtQ /tmp/tmp.P9sirlPugx +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kolrrsyuew +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pPSVNCHheK ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kolrrsyuew ++++ cat /tmp/tmp.pPSVNCHheK ++++ rm /tmp/tmp.kolrrsyuew /tmp/tmp.pPSVNCHheK ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qkmWkhNa61 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mKpmT0OfVU ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.qkmWkhNa61 ++++ cat /tmp/tmp.mKpmT0OfVU ++++ rm /tmp/tmp.qkmWkhNa61 /tmp/tmp.mKpmT0OfVU ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Y0U8aMpr6Z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.m5mpXZP7f7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Y0U8aMpr6Z +++ cat /tmp/tmp.m5mpXZP7f7 +++ rm /tmp/tmp.Y0U8aMpr6Z /tmp/tmp.m5mpXZP7f7 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vlNJd2mh8P +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.C8E28sbaNL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.vlNJd2mh8P ++++ cat /tmp/tmp.C8E28sbaNL ++++ rm /tmp/tmp.vlNJd2mh8P /tmp/tmp.C8E28sbaNL ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ZGKEAkc8C5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nJRBkwkMek ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ZGKEAkc8C5 ++++ cat /tmp/tmp.nJRBkwkMek ++++ rm /tmp/tmp.ZGKEAkc8C5 /tmp/tmp.nJRBkwkMek ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QR1cD2ZLVi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NbpM8WXxeo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QR1cD2ZLVi +++ cat /tmp/tmp.NbpM8WXxeo +++ rm /tmp/tmp.QR1cD2ZLVi /tmp/tmp.NbpM8WXxeo +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gjNuwyjMWD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ngEkQQrTFb ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gjNuwyjMWD ++++ cat /tmp/tmp.ngEkQQrTFb ++++ rm /tmp/tmp.gjNuwyjMWD /tmp/tmp.ngEkQQrTFb ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lMklLcIoVa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rSM6eEO38q ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.lMklLcIoVa ++++ cat /tmp/tmp.rSM6eEO38q ++++ rm /tmp/tmp.lMklLcIoVa /tmp/tmp.rSM6eEO38q ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x6t1wXZZWv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IL83cyDDcI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.x6t1wXZZWv +++ cat /tmp/tmp.IL83cyDDcI +++ rm /tmp/tmp.x6t1wXZZWv /tmp/tmp.IL83cyDDcI +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ grep /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zS8QixxcZp +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BJBd7KdZLc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.zS8QixxcZp ++++ cat /tmp/tmp.BJBd7KdZLc ++++ rm /tmp/tmp.zS8QixxcZp /tmp/tmp.BJBd7KdZLc ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tlJpgS6FP2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NrS4ZNsz20 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.tlJpgS6FP2 ++++ cat /tmp/tmp.NrS4ZNsz20 ++++ rm /tmp/tmp.tlJpgS6FP2 /tmp/tmp.NrS4ZNsz20 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QET6NywORs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.znMfOsWXnM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QET6NywORs +++ cat /tmp/tmp.znMfOsWXnM +++ rm /tmp/tmp.QET6NywORs /tmp/tmp.znMfOsWXnM +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6UEKsGR8Rk +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.wyKmCEVvZg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.6UEKsGR8Rk ++++ cat /tmp/tmp.wyKmCEVvZg ++++ rm /tmp/tmp.6UEKsGR8Rk /tmp/tmp.wyKmCEVvZg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.J1eMhUdags +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dPFWYkJ30c ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.J1eMhUdags ++++ cat /tmp/tmp.dPFWYkJ30c ++++ rm /tmp/tmp.J1eMhUdags /tmp/tmp.dPFWYkJ30c ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XefNYqj3Ia ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6ZZR8yWxMJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XefNYqj3Ia +++ cat /tmp/tmp.6ZZR8yWxMJ +++ rm /tmp/tmp.XefNYqj3Ia /tmp/tmp.6ZZR8yWxMJ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CKt08sWFDb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fsdO45JTkP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CKt08sWFDb ++++ cat /tmp/tmp.fsdO45JTkP ++++ rm /tmp/tmp.CKt08sWFDb /tmp/tmp.fsdO45JTkP ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CsIxV7xJGn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8aoCBoZ4Op ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CsIxV7xJGn ++++ cat /tmp/tmp.8aoCBoZ4Op ++++ rm /tmp/tmp.CsIxV7xJGn /tmp/tmp.8aoCBoZ4Op ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.06hW9xrz9v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P94NALD3QL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.06hW9xrz9v +++ cat /tmp/tmp.P94NALD3QL +++ rm /tmp/tmp.06hW9xrz9v /tmp/tmp.P94NALD3QL +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3decb696-753d-465a-8852-b26735dc588f +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kfQqIe78b8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4KF0MWOJmu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kfQqIe78b8 ++++ cat /tmp/tmp.4KF0MWOJmu ++++ rm /tmp/tmp.kfQqIe78b8 /tmp/tmp.4KF0MWOJmu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ogM4UdxDmS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BHrbkHBiEj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ogM4UdxDmS ++++ cat /tmp/tmp.BHrbkHBiEj ++++ rm /tmp/tmp.ogM4UdxDmS /tmp/tmp.BHrbkHBiEj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lEMutFeZPr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TQI8niLGWu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.lEMutFeZPr +++ cat /tmp/tmp.TQI8niLGWu +++ rm /tmp/tmp.lEMutFeZPr /tmp/tmp.TQI8niLGWu +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.i9UL8kfiae +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tFTIKuRn4Q ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.i9UL8kfiae ++++ cat /tmp/tmp.tFTIKuRn4Q ++++ rm /tmp/tmp.i9UL8kfiae /tmp/tmp.tFTIKuRn4Q ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hSy6j1RtBA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KPJPDEaFzh ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hSy6j1RtBA ++++ cat /tmp/tmp.KPJPDEaFzh ++++ rm /tmp/tmp.hSy6j1RtBA /tmp/tmp.KPJPDEaFzh ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BtcALRnqze ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fsj5hsX6EJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BtcALRnqze +++ cat /tmp/tmp.fsj5hsX6EJ +++ rm /tmp/tmp.BtcALRnqze /tmp/tmp.fsj5hsX6EJ +++ return 0 ++ echo /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 /node_id/3decb696-753d-465a-8852-b26735dc588f /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/3decb696-753d-465a-8852-b26735dc588f ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.V9eAHfQEIc ++ mktemp + local LAST_ERR=/tmp/tmp.kN5xeMChg3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V9eAHfQEIc perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.kN5xeMChg3 + rm /tmp/tmp.V9eAHfQEIc /tmp/tmp.kN5xeMChg3 + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted...........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.IvUiHajpbB ++ mktemp + local LAST_ERR=/tmp/tmp.ujs6OAfSmY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IvUiHajpbB NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 14m + cat /tmp/tmp.ujs6OAfSmY + rm /tmp/tmp.IvUiHajpbB /tmp/tmp.ujs6OAfSmY + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.J5JZ0GhGuT ++ mktemp + local LAST_ERR=/tmp/tmp.bQYoNBXLsO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.J5JZ0GhGuT NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 15m + cat /tmp/tmp.bQYoNBXLsO + rm /tmp/tmp.J5JZ0GhGuT /tmp/tmp.bQYoNBXLsO + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.OIvhipZkwc ++ mktemp + local LAST_ERR=/tmp/tmp.JHz1tiW8sv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OIvhipZkwc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.225.58 27019/TCP 15m + cat /tmp/tmp.JHz1tiW8sv + rm /tmp/tmp.OIvhipZkwc /tmp/tmp.JHz1tiW8sv + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 /node_id/3decb696-753d-465a-8852-b26735dc588f /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 ++ nodeList=('/node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11' '/node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d' '/node_id/ddf59699-37f4-4f35-8b94-77c0af91006d' '/node_id/9e117987-4f46-46c3-8483-b01a4eedc624' '/node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318' '/node_id/66eafeac-36af-4076-92a8-aee71cb01f1e' '/node_id/40860f87-38a1-4feb-bea9-3fe4541c1898' '/node_id/3decb696-753d-465a-8852-b26735dc588f' '/node_id/79bd9f9d-078a-4c53-b421-0432fbe33525') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2d96570a-dccb-461d-b60b-4d58cd5d5a11 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xSFbhaFwCF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HIihQ51Xu7 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.xSFbhaFwCF ++++ cat /tmp/tmp.HIihQ51Xu7 ++++ rm /tmp/tmp.xSFbhaFwCF /tmp/tmp.HIihQ51Xu7 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pvpaNk8tbG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lH3KBhwnnP ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pvpaNk8tbG ++++ cat /tmp/tmp.lH3KBhwnnP ++++ rm /tmp/tmp.pvpaNk8tbG /tmp/tmp.lH3KBhwnnP ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9ViAsU2F3D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8vsq0ffxD0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9ViAsU2F3D +++ cat /tmp/tmp.8vsq0ffxD0 +++ rm /tmp/tmp.9ViAsU2F3D /tmp/tmp.8vsq0ffxD0 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/78444cab-9cdb-4cb0-be5b-58fbc3af321d +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MsfPkY485n +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.L3Q6s8MbJr ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.MsfPkY485n ++++ cat /tmp/tmp.L3Q6s8MbJr ++++ rm /tmp/tmp.MsfPkY485n /tmp/tmp.L3Q6s8MbJr ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IsaALQ13pj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SJf68kJAKL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IsaALQ13pj ++++ cat /tmp/tmp.SJf68kJAKL ++++ rm /tmp/tmp.IsaALQ13pj /tmp/tmp.SJf68kJAKL ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.wE6N2baqBL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZVKtzBIBlX +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.wE6N2baqBL +++ cat /tmp/tmp.ZVKtzBIBlX +++ rm /tmp/tmp.wE6N2baqBL /tmp/tmp.ZVKtzBIBlX +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ddf59699-37f4-4f35-8b94-77c0af91006d ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.c4seu2JJdb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8nveZXpiLb ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.c4seu2JJdb ++++ cat /tmp/tmp.8nveZXpiLb ++++ rm /tmp/tmp.c4seu2JJdb /tmp/tmp.8nveZXpiLb ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.peFz5tYOT9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RGOE7RtsE9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.peFz5tYOT9 ++++ cat /tmp/tmp.RGOE7RtsE9 ++++ rm /tmp/tmp.peFz5tYOT9 /tmp/tmp.RGOE7RtsE9 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9MCWi7C9XB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7tmX86blD2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9MCWi7C9XB +++ cat /tmp/tmp.7tmX86blD2 +++ rm /tmp/tmp.9MCWi7C9XB /tmp/tmp.7tmX86blD2 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/9e117987-4f46-46c3-8483-b01a4eedc624 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nIFW478Zft +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3ezLdsj2Cp ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.nIFW478Zft ++++ cat /tmp/tmp.3ezLdsj2Cp ++++ rm /tmp/tmp.nIFW478Zft /tmp/tmp.3ezLdsj2Cp ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ZRy6mkOVjX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.xt79ms9CiL ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ZRy6mkOVjX ++++ cat /tmp/tmp.xt79ms9CiL ++++ rm /tmp/tmp.ZRy6mkOVjX /tmp/tmp.xt79ms9CiL ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yOOfPH6VEt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o2kCooUCff +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.yOOfPH6VEt +++ cat /tmp/tmp.o2kCooUCff +++ rm /tmp/tmp.yOOfPH6VEt /tmp/tmp.o2kCooUCff +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8a7ddf89-335a-4f1a-9624-c18e388a7318 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.3Z9WwIXAFW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.TvlvCWIHMi ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.3Z9WwIXAFW ++++ cat /tmp/tmp.TvlvCWIHMi ++++ rm /tmp/tmp.3Z9WwIXAFW /tmp/tmp.TvlvCWIHMi ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.4Atk5g05Gk +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.zQ793a5LhV ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.4Atk5g05Gk ++++ cat /tmp/tmp.zQ793a5LhV ++++ rm /tmp/tmp.4Atk5g05Gk /tmp/tmp.zQ793a5LhV ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DcqRA6Bh6l ++++ mktemp +++ local LAST_ERR=/tmp/tmp.F7PvCdkKgn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DcqRA6Bh6l +++ cat /tmp/tmp.F7PvCdkKgn +++ rm /tmp/tmp.DcqRA6Bh6l /tmp/tmp.F7PvCdkKgn +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/66eafeac-36af-4076-92a8-aee71cb01f1e +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wGMip9WW4p +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.YfAkN8KTU0 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.wGMip9WW4p ++++ cat /tmp/tmp.YfAkN8KTU0 ++++ rm /tmp/tmp.wGMip9WW4p /tmp/tmp.YfAkN8KTU0 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ocBaC1gtUW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eWvghTxMXT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ocBaC1gtUW ++++ cat /tmp/tmp.eWvghTxMXT ++++ rm /tmp/tmp.ocBaC1gtUW /tmp/tmp.eWvghTxMXT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j7tEvYXv2h ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5A0VWt1MBQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.j7tEvYXv2h +++ cat /tmp/tmp.5A0VWt1MBQ +++ rm /tmp/tmp.j7tEvYXv2h /tmp/tmp.5A0VWt1MBQ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/40860f87-38a1-4feb-bea9-3fe4541c1898 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5Hj4Yd2OBX +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7To0vIhC4l ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.5Hj4Yd2OBX ++++ cat /tmp/tmp.7To0vIhC4l ++++ rm /tmp/tmp.5Hj4Yd2OBX /tmp/tmp.7To0vIhC4l ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jiAvkNIevU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hoJCg68Pkc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.jiAvkNIevU ++++ cat /tmp/tmp.hoJCg68Pkc ++++ rm /tmp/tmp.jiAvkNIevU /tmp/tmp.hoJCg68Pkc ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Tqsf829Nzw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iDQxRw2hoe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Tqsf829Nzw +++ cat /tmp/tmp.iDQxRw2hoe +++ rm /tmp/tmp.Tqsf829Nzw /tmp/tmp.iDQxRw2hoe +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3decb696-753d-465a-8852-b26735dc588f +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HCsZY1Uw9Z +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.OU3BrF8Ncm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.HCsZY1Uw9Z ++++ cat /tmp/tmp.OU3BrF8Ncm ++++ rm /tmp/tmp.HCsZY1Uw9Z /tmp/tmp.OU3BrF8Ncm ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CnUGP9Xesa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fJmQjs7GXR ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.CnUGP9Xesa ++++ cat /tmp/tmp.fJmQjs7GXR ++++ rm /tmp/tmp.CnUGP9Xesa /tmp/tmp.fJmQjs7GXR ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1iId2yJ85Y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XKpvn17pLD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.1iId2yJ85Y +++ cat /tmp/tmp.XKpvn17pLD +++ rm /tmp/tmp.1iId2yJ85Y /tmp/tmp.XKpvn17pLD +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/79bd9f9d-078a-4c53-b421-0432fbe33525 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.c1qCWZMeUs +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.AIzaBP56wR ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.c1qCWZMeUs ++++ cat /tmp/tmp.AIzaBP56wR ++++ rm /tmp/tmp.c1qCWZMeUs /tmp/tmp.AIzaBP56wR ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wPjrWcjpzi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.iF7ZsG8Ffr ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.wPjrWcjpzi ++++ cat /tmp/tmp.iF7ZsG8Ffr ++++ rm /tmp/tmp.wPjrWcjpzi /tmp/tmp.iF7ZsG8Ffr ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2wL2DAatJs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AeLqG1VxVI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-28042 monitoring-0 -- pmm-admin --server-url=https://admin:admin@136.111.183.188/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2wL2DAatJs +++ cat /tmp/tmp.AeLqG1VxVI +++ rm /tmp/tmp.2wL2DAatJs /tmp/tmp.AeLqG1VxVI +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.DtynNzY4eo ++ mktemp + local LAST_ERR=/tmp/tmp.B2HTbsqs8N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DtynNzY4eo perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.B2HTbsqs8N + rm /tmp/tmp.DtynNzY4eo /tmp/tmp.B2HTbsqs8N + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MIIjFGyxt1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XaSgUkZOz2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.MIIjFGyxt1 ++ cat /tmp/tmp.XaSgUkZOz2 ++ rm /tmp/tmp.MIIjFGyxt1 /tmp/tmp.XaSgUkZOz2 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zRImzNR3Ho +++ mktemp ++ local LAST_ERR=/tmp/tmp.M7XYALlrBT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zRImzNR3Ho ++ cat /tmp/tmp.M7XYALlrBT ++ rm /tmp/tmp.zRImzNR3Ho /tmp/tmp.M7XYALlrBT ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.C4KEoyFIyv +++ mktemp ++ local LAST_ERR=/tmp/tmp.xHbWHqtDXe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.C4KEoyFIyv ++ cat /tmp/tmp.xHbWHqtDXe ++ rm /tmp/tmp.C4KEoyFIyv /tmp/tmp.xHbWHqtDXe ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.......... ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5Gz3Vp2Bqy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AZ0K9URrNx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5Gz3Vp2Bqy +++ cat /tmp/tmp.AZ0K9URrNx +++ rm /tmp/tmp.5Gz3Vp2Bqy /tmp/tmp.AZ0K9URrNx +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.M2ZirtEBFL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5VXNf9ggFy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.M2ZirtEBFL +++ cat /tmp/tmp.5VXNf9ggFy +++ rm /tmp/tmp.M2ZirtEBFL /tmp/tmp.5VXNf9ggFy +++ return 0 ++ local ip=136.111.183.188 ++ '[' -n 136.111.183.188 -a 136.111.183.188 '!=' null ']' ++ echo 136.111.183.188 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@136.111.183.188/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-28042-monitoring-mongos-0 /tmp/tmp.rwbmdV3g89/pmm_service_list.json + local pod_service_name=monitoring-2-0-28042-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.rwbmdV3g89/pmm_service_list.json + echo 'Checking monitoring-2-0-28042-monitoring-mongos-0' Checking monitoring-2-0-28042-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-28042-monitoring-mongos-0") | .cluster' /tmp/tmp.rwbmdV3g89/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-28042-monitoring-rs0-0 /tmp/tmp.rwbmdV3g89/pmm_service_list.json + local pod_service_name=monitoring-2-0-28042-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.rwbmdV3g89/pmm_service_list.json + echo 'Checking monitoring-2-0-28042-monitoring-rs0-0' Checking monitoring-2-0-28042-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-28042-monitoring-rs0-0") | .cluster' /tmp/tmp.rwbmdV3g89/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-28042-monitoring-cfg-0 /tmp/tmp.rwbmdV3g89/pmm_service_list.json + local pod_service_name=monitoring-2-0-28042-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.rwbmdV3g89/pmm_service_list.json + echo 'Checking monitoring-2-0-28042-monitoring-cfg-0' Checking monitoring-2-0-28042-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-28042-monitoring-cfg-0") | .cluster' /tmp/tmp.rwbmdV3g89/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yLLhme83cR +++ mktemp ++ local LAST_ERR=/tmp/tmp.d5W03wgmT2 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yLLhme83cR ++ cat /tmp/tmp.d5W03wgmT2 ++ rm /tmp/tmp.yLLhme83cR /tmp/tmp.d5W03wgmT2 ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-28042 + local namespace=monitoring-2-0-28042 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.AWSPZ02l0A +++ mktemp ++ local LAST_ERR=/tmp/tmp.RbtAMlShEq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AWSPZ02l0A ++ cat /tmp/tmp.RbtAMlShEq No resources found in monitoring-2-0-28042 namespace. ++ rm /tmp/tmp.AWSPZ02l0A /tmp/tmp.RbtAMlShEq ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.rMukou2l8w ++ mktemp + local LAST_ERR=/tmp/tmp.9ntxUy9gQY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rMukou2l8w customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.9ntxUy9gQY + rm /tmp/tmp.rMukou2l8w /tmp/tmp.9ntxUy9gQY + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2JINVngUmR ++ mktemp + local LAST_ERR=/tmp/tmp.hh7PnraOz9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2JINVngUmR + cat /tmp/tmp.hh7PnraOz9 + rm /tmp/tmp.2JINVngUmR /tmp/tmp.hh7PnraOz9 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WwDRli3ldk ++ mktemp + local LAST_ERR=/tmp/tmp.UTRdURW3k3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WwDRli3ldk + cat /tmp/tmp.UTRdURW3k3 + rm /tmp/tmp.WwDRli3ldk /tmp/tmp.UTRdURW3k3 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.OcJTqGW6lq ++ mktemp + local LAST_ERR=/tmp/tmp.zNfoY18WLd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OcJTqGW6lq + cat /tmp/tmp.zNfoY18WLd + rm /tmp/tmp.OcJTqGW6lq /tmp/tmp.zNfoY18WLd + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.myfWD81o2q ++ mktemp + local LAST_ERR=/tmp/tmp.lEbSrFtFt9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2266/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.myfWD81o2q clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.lEbSrFtFt9 + rm /tmp/tmp.myfWD81o2q /tmp/tmp.lEbSrFtFt9 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.p4m8LNgZzq ++ mktemp + local LAST_ERR=/tmp/tmp.Gms8XsqwdK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p4m8LNgZzq namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace deployment.apps "cert-manager-cainjector" deleted from cert-manager namespace deployment.apps "cert-manager" deleted from cert-manager namespace deployment.apps "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.Gms8XsqwdK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p4m8LNgZzq namespace "cert-manager" deleted + cat /tmp/tmp.Gms8XsqwdK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.p4m8LNgZzq + cat /tmp/tmp.Gms8XsqwdK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.p4m8LNgZzq + cat /tmp/tmp.Gms8XsqwdK Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.p4m8LNgZzq /tmp/tmp.Gms8XsqwdK + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-28042 + rm -rf /tmp/tmp.rwbmdV3g89 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.t4kwtw2BJ9 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed+ local LAST_OUT=/tmp/tmp.mrNse4qo54 ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.HpdmdLmKWe + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.JJJUGv1Xjh + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-28042