Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/logs/monitoring-2-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-711 + local ns=monitoring-2-0-711 + [[ 1 == 1 ]] + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.JTU7TbZRLx ++ mktemp + local LAST_ERR=/tmp/tmp.WMvcdy7EHS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JTU7TbZRLx customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WMvcdy7EHS + rm /tmp/tmp.JTU7TbZRLx /tmp/tmp.WMvcdy7EHS + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.602RUWTHKi ++ mktemp + local LAST_ERR=/tmp/tmp.0LEzkwdSIQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.602RUWTHKi + cat /tmp/tmp.0LEzkwdSIQ + rm /tmp/tmp.602RUWTHKi /tmp/tmp.0LEzkwdSIQ + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WJ32CLFDTe ++ mktemp + local LAST_ERR=/tmp/tmp.eXO6sUZ9U1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WJ32CLFDTe + cat /tmp/tmp.eXO6sUZ9U1 + rm /tmp/tmp.WJ32CLFDTe /tmp/tmp.eXO6sUZ9U1 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qGQ5ewTDOu ++ mktemp + local LAST_ERR=/tmp/tmp.RzfhIUzsTb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qGQ5ewTDOu + cat /tmp/tmp.RzfhIUzsTb + rm /tmp/tmp.qGQ5ewTDOu /tmp/tmp.RzfhIUzsTb + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.AIm3HBIQBQ ++ mktemp + local LAST_ERR=/tmp/tmp.ixWho3S6fz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AIm3HBIQBQ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.ixWho3S6fz + rm /tmp/tmp.AIm3HBIQBQ /tmp/tmp.ixWho3S6fz + return 0 + check_crd_for_deletion PR-2219-d7e802db + local git_tag=PR-2219-d7e802db ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-2219-d7e802db/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/sbin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/sbin/sed s/---//g + for crd_name in $(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '.metadata.name' | $sed 's/---//g' | $sed ':a;N;$!ba;s/\n/ /g') ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mN6O3hvOsU +++ mktemp ++ local LAST_ERR=/tmp/tmp.vh8N59KiOW ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mN6O3hvOsU ++ cat /tmp/tmp.vh8N59KiOW Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mN6O3hvOsU ++ cat /tmp/tmp.vh8N59KiOW Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.mN6O3hvOsU ++ cat /tmp/tmp.vh8N59KiOW Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.mN6O3hvOsU ++ cat /tmp/tmp.vh8N59KiOW Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.mN6O3hvOsU /tmp/tmp.vh8N59KiOW ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + kubectl_bin get ns ++ mktemp + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.hf6aHc3Y88 ++ mktemp + local LAST_OUT=/tmp/tmp.UTGRm1c3G4 + local LAST_ERR=/tmp/tmp.y9LAryEKyX + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ mktemp + for i in $(seq 0 2) + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + local LAST_ERR=/tmp/tmp.NEZ6YxodES + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UTGRm1c3G4 + cat /tmp/tmp.NEZ6YxodES + rm /tmp/tmp.UTGRm1c3G4 /tmp/tmp.NEZ6YxodES + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-24944" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hf6aHc3Y88 namespace "psmdb-operator" deleted + cat /tmp/tmp.y9LAryEKyX + rm /tmp/tmp.hf6aHc3Y88 /tmp/tmp.y9LAryEKyX + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Vd3zC8WgdB ++ mktemp + local LAST_ERR=/tmp/tmp.wHAeISlZyO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Vd3zC8WgdB + cat /tmp/tmp.wHAeISlZyO + rm /tmp/tmp.Vd3zC8WgdB /tmp/tmp.wHAeISlZyO + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.AlJwZmqhwg ++ mktemp + local LAST_ERR=/tmp/tmp.tCKqO44GVA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AlJwZmqhwg namespace/psmdb-operator created + cat /tmp/tmp.tCKqO44GVA + rm /tmp/tmp.AlJwZmqhwg /tmp/tmp.tCKqO44GVA + return 0 + set_kube_ctx psmdb-operator + local namespace=psmdb-operator ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.U34Mzjh3z3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4b9mf5osBC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.U34Mzjh3z3 ++ cat /tmp/tmp.4b9mf5osBC ++ rm /tmp/tmp.U34Mzjh3z3 /tmp/tmp.4b9mf5osBC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Hr5uyLxJJe ++ mktemp + local LAST_ERR=/tmp/tmp.txJ21ZYgWB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Hr5uyLxJJe Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1" modified. + cat /tmp/tmp.txJ21ZYgWB + rm /tmp/tmp.Hr5uyLxJJe /tmp/tmp.txJ21ZYgWB + return 0 + deploy_operator + desc 'start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator: docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6ZzzoaKvlZ ++ mktemp + local LAST_ERR=/tmp/tmp.dSraI8QDwo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6ZzzoaKvlZ customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.dSraI8QDwo + rm /tmp/tmp.6ZzzoaKvlZ /tmp/tmp.dSraI8QDwo + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.6yl3O16dAs ++ mktemp + local LAST_ERR=/tmp/tmp.iTDA3vzljE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6yl3O16dAs clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.iTDA3vzljE + rm /tmp/tmp.6yl3O16dAs /tmp/tmp.iTDA3vzljE + return 0 + yq eval ' (.spec.template.spec.containers[].image = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-operator.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.FiVU1COmM3 ++ mktemp + local LAST_ERR=/tmp/tmp.u4tTUd0gBU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FiVU1COmM3 deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.u4tTUd0gBU + rm /tmp/tmp.FiVU1COmM3 /tmp/tmp.u4tTUd0gBU + return 0 + sleep 20 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.A8UUaKJQ7o +++ mktemp ++ local LAST_ERR=/tmp/tmp.F4P7tio4K6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.A8UUaKJQ7o ++ cat /tmp/tmp.F4P7tio4K6 ++ rm /tmp/tmp.A8UUaKJQ7o /tmp/tmp.F4P7tio4K6 ++ return 0 + wait_operator_pod percona-server-mongodb-operator-76758dcf9-fgjjk + local pod=percona-server-mongodb-operator-76758dcf9-fgjjk + set +o xtrace waiting for pod/percona-server-mongodb-operator-76758dcf9-fgjjk to be ready.OK + echo 'Print operator info from log' Print operator info from log + grep 'Manager starting up' ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.vuQOGQP7QL +++ mktemp ++ local LAST_ERR=/tmp/tmp.cgf6wGP8rZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vuQOGQP7QL ++ cat /tmp/tmp.cgf6wGP8rZ ++ rm /tmp/tmp.vuQOGQP7QL /tmp/tmp.cgf6wGP8rZ ++ return 0 + kubectl_bin logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-fgjjk ++ mktemp + local LAST_OUT=/tmp/tmp.cop4Ln8GTb ++ mktemp + local LAST_ERR=/tmp/tmp.uZ5XE4hbru + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl logs -n psmdb-operator percona-server-mongodb-operator-76758dcf9-fgjjk + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cop4Ln8GTb + cat /tmp/tmp.uZ5XE4hbru + rm /tmp/tmp.cop4Ln8GTb /tmp/tmp.uZ5XE4hbru + return 0 2026-03-10T22:30:16.342Z INFO setup Manager starting up {"gitCommit": "d7e802db10c9b2b2028f56c7b5227cb276fe5878", "gitBranch": "PR-2219-d7e802db", "buildTime": "", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} + create_namespace monitoring-2-0-711 + local namespace=monitoring-2-0-711 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + grep -E -v '^kube-|^default|Terminating|psmdb-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-711' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-711 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-711 --ignore-not-found + awk '{print$1}' ++ mktemp + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.CKRQFSbTzF + local LAST_OUT=/tmp/tmp.rqAXBdNnCb ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.m82iaRMVNY + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.yfZWfD4WOE + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete namespace monitoring-2-0-711 --ignore-not-found + for i in $(seq 0 2) + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.CKRQFSbTzF + cat /tmp/tmp.yfZWfD4WOE + rm /tmp/tmp.CKRQFSbTzF /tmp/tmp.yfZWfD4WOE + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rqAXBdNnCb + cat /tmp/tmp.m82iaRMVNY + rm /tmp/tmp.rqAXBdNnCb /tmp/tmp.m82iaRMVNY + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-711 ++ mktemp + local LAST_OUT=/tmp/tmp.NmpZ9WDia1 ++ mktemp + local LAST_ERR=/tmp/tmp.PqhP28eo1b + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete namespace monitoring-2-0-711 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NmpZ9WDia1 + cat /tmp/tmp.PqhP28eo1b + rm /tmp/tmp.NmpZ9WDia1 /tmp/tmp.PqhP28eo1b + return 0 + desc 'create namespace monitoring-2-0-711' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-711 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-711 ++ mktemp + local LAST_OUT=/tmp/tmp.er45bWDxqP ++ mktemp + local LAST_ERR=/tmp/tmp.YnSA0o57Y9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace monitoring-2-0-711 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.er45bWDxqP namespace/monitoring-2-0-711 created + cat /tmp/tmp.YnSA0o57Y9 + rm /tmp/tmp.er45bWDxqP /tmp/tmp.YnSA0o57Y9 + return 0 + set_kube_ctx monitoring-2-0-711 + local namespace=monitoring-2-0-711 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.D0CG08mJoS +++ mktemp ++ local LAST_ERR=/tmp/tmp.bX1TwY552Y ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.D0CG08mJoS ++ cat /tmp/tmp.bX1TwY552Y ++ rm /tmp/tmp.D0CG08mJoS /tmp/tmp.bX1TwY552Y ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1 --namespace=monitoring-2-0-711 ++ mktemp + local LAST_OUT=/tmp/tmp.P3aZPCEozV ++ mktemp + local LAST_ERR=/tmp/tmp.eBusjsaQRw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1 --namespace=monitoring-2-0-711 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P3aZPCEozV Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-2219-d7e802db-7-cluster1" modified. + cat /tmp/tmp.eBusjsaQRw + rm /tmp/tmp.P3aZPCEozV /tmp/tmp.eBusjsaQRw + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.KsBqXZF8Cn ++ mktemp + local LAST_ERR=/tmp/tmp.h2Th8ubGFO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KsBqXZF8Cn namespace/cert-manager created + cat /tmp/tmp.h2Th8ubGFO + rm /tmp/tmp.KsBqXZF8Cn /tmp/tmp.h2Th8ubGFO + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.KJgNfjqnxp ++ mktemp + local LAST_ERR=/tmp/tmp.e9ZoHlNHHU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KJgNfjqnxp namespace/cert-manager labeled + cat /tmp/tmp.e9ZoHlNHHU + rm /tmp/tmp.KJgNfjqnxp /tmp/tmp.e9ZoHlNHHU + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.RSuTGQTWUe ++ mktemp + local LAST_ERR=/tmp/tmp.NQLRD11Kfe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RSuTGQTWUe namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-tokenrequest created role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager-tokenrequest created rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager-cainjector created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.NQLRD11Kfe Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.RSuTGQTWUe /tmp/tmp.NQLRD11Kfe + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.V9Ur2oZPSR ++ mktemp + local LAST_ERR=/tmp/tmp.u2QHa1Atrw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V9Ur2oZPSR pod/cert-manager-559d798845-kghnz condition met pod/cert-manager-cainjector-64958d9c7c-29v6w condition met pod/cert-manager-webhook-7fb6f99b56-qzflg condition met + cat /tmp/tmp.u2QHa1Atrw + rm /tmp/tmp.V9Ur2oZPSR /tmp/tmp.u2QHa1Atrw + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz Error: INSTALLATION FAILED: no cached repo found. (try 'helm repo update'): open /root/.cache/helm/repository/minio-index.yaml: no such file or directory + [[ 1 -ge 10 ]] + (( n++ )) + sleep 60 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz Error: INSTALLATION FAILED: no cached repo found. (try 'helm repo update'): open /root/.cache/helm/repository/minio-index.yaml: no such file or directory + [[ 2 -ge 10 ]] + (( n++ )) + sleep 60 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz Error: INSTALLATION FAILED: no cached repo found. (try 'helm repo update'): open /root/.cache/helm/repository/minio-index.yaml: no such file or directory + [[ 3 -ge 10 ]] + (( n++ )) + sleep 60 + helm install monitoring --set imageTag=dev-latest --set imageRepo=docker.io/perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Mar 10 22:36:19 2026 NAMESPACE: monitoring-2-0-711 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-711.svc.cluster.local:443 login: admin password: admin + sleep 40 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.ETZ5lYBcMy ++ mktemp + local LAST_ERR=/tmp/tmp.6Ilav5MbFN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ETZ5lYBcMy + cat /tmp/tmp.6Ilav5MbFN error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ETZ5lYBcMy + cat /tmp/tmp.6Ilav5MbFN error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.ETZ5lYBcMy + cat /tmp/tmp.6Ilav5MbFN error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.ETZ5lYBcMy + cat /tmp/tmp.6Ilav5MbFN error: Internal error occurred: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.ETZ5lYBcMy /tmp/tmp.6Ilav5MbFN + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 30 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.8dVaZq6nze ++ mktemp + local LAST_ERR=/tmp/tmp.zZpBlmBdmH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8dVaZq6nze + cat /tmp/tmp.zZpBlmBdmH + rm /tmp/tmp.8dVaZq6nze /tmp/tmp.zZpBlmBdmH + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.W1hysxUmGV ++ mktemp + local LAST_ERR=/tmp/tmp.9o3wHzqGsk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W1hysxUmGV secret/some-users created secret/some-users unchanged + cat /tmp/tmp.9o3wHzqGsk + rm /tmp/tmp.W1hysxUmGV /tmp/tmp.9o3wHzqGsk + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EDcJvaQw5k ++ mktemp + local LAST_ERR=/tmp/tmp.rGl0DI21Bd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EDcJvaQw5k deployment.apps/psmdb-client created + cat /tmp/tmp.rGl0DI21Bd + rm /tmp/tmp.EDcJvaQw5k /tmp/tmp.rGl0DI21Bd + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "docker.io/perconalab/percona-server-mongodb-operator:main-mongod8.0"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "docker.io/percona/pmm-client:2.44.1-1"' ++ mktemp + /usr/sbin/sed -e s/NAME_SPACE/monitoring-2-0-711/g + yq eval '(.spec | select(has("initImage"))).initImage = "docker.io/perconalab/percona-server-mongodb-operator:PR-2219-d7e802db"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "docker.io/perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.DF7sxg96Jf ++ mktemp + local LAST_ERR=/tmp/tmp.PVK1b9gEjc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DF7sxg96Jf perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.PVK1b9gEjc + rm /tmp/tmp.DF7sxg96Jf /tmp/tmp.PVK1b9gEjc + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready............OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uzhtQWNlfr +++ mktemp ++ local LAST_ERR=/tmp/tmp.sYB1Q74y5A ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uzhtQWNlfr ++ cat /tmp/tmp.sYB1Q74y5A ++ rm /tmp/tmp.uzhtQWNlfr /tmp/tmp.sYB1Q74y5A ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R6d5bJjGko +++ mktemp ++ local LAST_ERR=/tmp/tmp.bjk0w1vrpn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R6d5bJjGko ++ cat /tmp/tmp.bjk0w1vrpn ++ rm /tmp/tmp.R6d5bJjGko /tmp/tmp.bjk0w1vrpn ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UNtBIikGmK +++ mktemp ++ local LAST_ERR=/tmp/tmp.vZ2Fjk61wn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UNtBIikGmK ++ cat /tmp/tmp.vZ2Fjk61wn ++ rm /tmp/tmp.UNtBIikGmK /tmp/tmp.vZ2Fjk61wn ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ALR47IXw3s ++ mktemp + local LAST_ERR=/tmp/tmp.xZttV3LYNr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ALR47IXw3s + cat /tmp/tmp.xZttV3LYNr + rm /tmp/tmp.ALR47IXw3s /tmp/tmp.xZttV3LYNr + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-10T22:41:15+0000] compare_kubectl: statefulset/monitoring-rs0 OK + sleep 10 + custom_port=27019 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-711 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-711 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ awk -F: '{print $2}' ++ echo .svc.cluster.local + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ml6telLpH5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gjwLL203y5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ml6telLpH5 ++ cat /tmp/tmp.gjwLL203y5 ++ rm /tmp/tmp.ml6telLpH5 /tmp/tmp.gjwLL203y5 ++ return 0 + local client_container=psmdb-client-699f458f75-9rrb2 + kubectl_bin exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Y7aTuU8bAh ++ mktemp + local LAST_ERR=/tmp/tmp.2ZdenCp9yb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y7aTuU8bAh Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T22:41:28.300Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e55d53aa-5e4a-4750-bf8c-373a727232a5") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.2ZdenCp9yb + rm /tmp/tmp.Y7aTuU8bAh /tmp/tmp.2ZdenCp9yb + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-711 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-711 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WrCGPQRRD2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.XDzob1Rdyv ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WrCGPQRRD2 ++ cat /tmp/tmp.XDzob1Rdyv ++ rm /tmp/tmp.WrCGPQRRD2 /tmp/tmp.XDzob1Rdyv ++ return 0 + local client_container=psmdb-client-699f458f75-9rrb2 + kubectl_bin exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.nByDbvi97t ++ mktemp + local LAST_ERR=/tmp/tmp.33dZ5J1bu7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nByDbvi97t Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T22:41:30.967Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b40d4383-7b6f-40d4-b539-8af886243b62") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1773182491, 8), "signature" : { "hash" : BinData(0,"cw8eTvTVQh/Ytau9dzFiHM1bc1g="), "keyId" : NumberLong("7615760529511940120") } }, "operationTime" : Timestamp(1773182491, 5) } bye + cat /tmp/tmp.33dZ5J1bu7 + rm /tmp/tmp.nByDbvi97t /tmp/tmp.33dZ5J1bu7 + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-711 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-711 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c5J5XSyikT +++ mktemp ++ local LAST_ERR=/tmp/tmp.WDKLIE4H33 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.c5J5XSyikT ++ cat /tmp/tmp.WDKLIE4H33 ++ rm /tmp/tmp.c5J5XSyikT /tmp/tmp.WDKLIE4H33 ++ return 0 + local client_container=psmdb-client-699f458f75-9rrb2 + kubectl_bin exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.ykse59oTqo ++ mktemp + local LAST_ERR=/tmp/tmp.CxYi074h2G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ykse59oTqo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T22:41:33.637Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fa4f0438-a83e-4276-af1e-ab06bb3d5b86") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.CxYi074h2G + rm /tmp/tmp.ykse59oTqo /tmp/tmp.CxYi074h2G + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-711 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-711 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HKGsoQodEh +++ mktemp ++ local LAST_ERR=/tmp/tmp.xC8OI2mTqN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.HKGsoQodEh ++ cat /tmp/tmp.xC8OI2mTqN ++ rm /tmp/tmp.HKGsoQodEh /tmp/tmp.xC8OI2mTqN ++ return 0 + local client_container=psmdb-client-699f458f75-9rrb2 + kubectl_bin exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.uySscNCWh3 ++ mktemp + local LAST_ERR=/tmp/tmp.Vdzen7CBRl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uySscNCWh3 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T22:41:36.013Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("a87563bf-083e-449f-b2c8-3e75b1551768") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.Vdzen7CBRl + rm /tmp/tmp.uySscNCWh3 /tmp/tmp.Vdzen7CBRl + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-711 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' 27019 + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-711 + local driver=mongodb + local suffix=.svc.cluster.local + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local port=27019 + local mongo_bin=mongo ++ echo .svc.cluster.local ++ awk -F: '{print $2}' + suffix_port= + [[ -z '' ]] + suffix=.svc.cluster.local:27019 ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2xvRyNNvWa +++ mktemp ++ local LAST_ERR=/tmp/tmp.ACJ2nE6W06 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.2xvRyNNvWa ++ cat /tmp/tmp.ACJ2nE6W06 ++ rm /tmp/tmp.2xvRyNNvWa /tmp/tmp.ACJ2nE6W06 ++ return 0 + local client_container=psmdb-client-699f458f75-9rrb2 + kubectl_bin exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.4SSkeNWb7v ++ mktemp + local LAST_ERR=/tmp/tmp.1aSLNOxuQq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl exec psmdb-client-699f458f75-9rrb2 -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.4SSkeNWb7v Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-711.svc.cluster.local:27019/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2026-03-10T22:41:38.695Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c674c59d-d62c-41c7-a59d-4f449593398d") } Percona Server for MongoDB server version: v8.0.19-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.1aSLNOxuQq + rm /tmp/tmp.4SSkeNWb7v /tmp/tmp.1aSLNOxuQq + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7maVxj02Ay +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.u80EtdSGWa ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.7maVxj02Ay ++++ cat /tmp/tmp.u80EtdSGWa ++++ rm /tmp/tmp.7maVxj02Ay /tmp/tmp.u80EtdSGWa ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ptcJdRxAHO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.O25tbEFe8F ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ptcJdRxAHO ++++ cat /tmp/tmp.O25tbEFe8F ++++ rm /tmp/tmp.ptcJdRxAHO /tmp/tmp.O25tbEFe8F ++++ return 0 +++ local ip=34.61.147.156 +++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' +++ echo 34.61.147.156 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.61.147.156/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 257 78 --:--:-- --:--:-- --:--:-- 336 + API_KEY='"eyJrIjoiTFdERGU4RGg5MW0xOFRINFo2azV1U2hRVFpTMXo2M2ciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiTFdERGU4RGg5MW0xOFRINFo2azV1U2hRVFpTMXo2M2ciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.UK8Ha4APkG ++ mktemp + local LAST_ERR=/tmp/tmp.1GwGEcs8kU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiTFdERGU4RGg5MW0xOFRINFo2azV1U2hRVFpTMXo2M2ciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UK8Ha4APkG secret/some-users patched + cat /tmp/tmp.1GwGEcs8kU + rm /tmp/tmp.UK8Ha4APkG /tmp/tmp.1GwGEcs8kU + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jsNi1OZ0cF +++ mktemp ++ local LAST_ERR=/tmp/tmp.XOcYzvQgLQ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jsNi1OZ0cF ++ cat /tmp/tmp.XOcYzvQgLQ ++ rm /tmp/tmp.jsNi1OZ0cF /tmp/tmp.XOcYzvQgLQ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4AzQgP6L9r +++ mktemp ++ local LAST_ERR=/tmp/tmp.snG99FYW5U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4AzQgP6L9r ++ cat /tmp/tmp.snG99FYW5U ++ rm /tmp/tmp.4AzQgP6L9r /tmp/tmp.snG99FYW5U ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.iQvow6EbpV +++ mktemp ++ local LAST_ERR=/tmp/tmp.7NrU6yupW1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.iQvow6EbpV ++ cat /tmp/tmp.7NrU6yupW1 ++ rm /tmp/tmp.iQvow6EbpV /tmp/tmp.7NrU6yupW1 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.M9ZySxkMgx ++ mktemp + local LAST_ERR=/tmp/tmp.klaCc53QgR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M9ZySxkMgx + cat /tmp/tmp.klaCc53QgR + rm /tmp/tmp.M9ZySxkMgx /tmp/tmp.klaCc53QgR + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-rs0.yml + log 'compare_kubectl: statefulset/monitoring-rs0 OK' + set +o xtrace [2026-03-10T22:48:28+0000] compare_kubectl: statefulset/monitoring-rs0 OK + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.jvfWgN3tn4/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.8jdRQg55wz ++ mktemp + local LAST_ERR=/tmp/tmp.ysdrzmREGE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8jdRQg55wz + cat /tmp/tmp.ysdrzmREGE + rm /tmp/tmp.8jdRQg55wz /tmp/tmp.ysdrzmREGE + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.jvfWgN3tn4/service_monitoring-rs0.yml + log 'compare_kubectl: service/monitoring-rs0 OK' + set +o xtrace [2026-03-10T22:48:29+0000] compare_kubectl: service/monitoring-rs0 OK + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.jvfWgN3tn4/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.MflVnJ1LjN ++ mktemp + local LAST_ERR=/tmp/tmp.Bj33jtUshZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MflVnJ1LjN + cat /tmp/tmp.Bj33jtUshZ + rm /tmp/tmp.MflVnJ1LjN /tmp/tmp.Bj33jtUshZ + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.jvfWgN3tn4/service_monitoring-mongos.yml + log 'compare_kubectl: service/monitoring-mongos OK' + set +o xtrace [2026-03-10T22:48:30+0000] compare_kubectl: service/monitoring-mongos OK + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.jvfWgN3tn4/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Zp5ucW6grU ++ mktemp + local LAST_ERR=/tmp/tmp.LxY3a0xzoA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Zp5ucW6grU + cat /tmp/tmp.LxY3a0xzoA + rm /tmp/tmp.Zp5ucW6grU /tmp/tmp.LxY3a0xzoA + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-cfg.yml + log 'compare_kubectl: statefulset/monitoring-cfg OK' + set +o xtrace [2026-03-10T22:48:31+0000] compare_kubectl: statefulset/monitoring-cfg OK + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local skip_generation_check= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.jvfWgN3tn4/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-711", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.FeXLfOZvaf ++ mktemp + local LAST_ERR=/tmp/tmp.mGQEFlhUUw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FeXLfOZvaf + cat /tmp/tmp.mGQEFlhUUw + rm /tmp/tmp.FeXLfOZvaf /tmp/tmp.mGQEFlhUUw + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.32 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + '[' -n '' ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.jvfWgN3tn4/statefulset_monitoring-mongos.yml + log 'compare_kubectl: statefulset/monitoring-mongos OK' + set +o xtrace [2026-03-10T22:48:32+0000] compare_kubectl: statefulset/monitoring-mongos OK + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-711-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-711-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773182852 ++ /usr/sbin/date -u +%s + local end=1773182912 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.m1HJumLkuJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HaTo5lDIm7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m1HJumLkuJ +++ cat /tmp/tmp.HaTo5lDIm7 +++ rm /tmp/tmp.m1HJumLkuJ /tmp/tmp.HaTo5lDIm7 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YppV7Y7LUZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.t3Axh89Uss +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YppV7Y7LUZ +++ cat /tmp/tmp.t3Axh89Uss +++ rm /tmp/tmp.YppV7Y7LUZ /tmp/tmp.t3Axh89Uss +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + local endpoint=34.61.147.156 + curl -s -k 'https://admin:admin@34.61.147.156/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-rs0-1%22%7D%29&start=1773182852&end=1773182912&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1773176640" "1773176640" + get_metric_values mongodb_connections monitoring-2-0-711-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-711-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773182854 ++ /usr/sbin/date -u +%s + local end=1773182914 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.YWUmY1rtid ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EMaCRiWjnA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.YWUmY1rtid +++ cat /tmp/tmp.EMaCRiWjnA +++ rm /tmp/tmp.YWUmY1rtid /tmp/tmp.EMaCRiWjnA +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jvPbwYHJoV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z07ZInt5Dz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jvPbwYHJoV +++ cat /tmp/tmp.Z07ZInt5Dz +++ rm /tmp/tmp.jvPbwYHJoV /tmp/tmp.Z07ZInt5Dz +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + local endpoint=34.61.147.156 + curl -s -k 'https://admin:admin@34.61.147.156/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-rs0-1%22%7D%29&start=1773182854&end=1773182914&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-711-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-711-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773182857 ++ /usr/sbin/date -u +%s + local end=1773182917 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.V8hYcoS1FA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lrvTch8aX8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.V8hYcoS1FA +++ cat /tmp/tmp.lrvTch8aX8 +++ rm /tmp/tmp.V8hYcoS1FA /tmp/tmp.lrvTch8aX8 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZjkMN7HrB6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ui9w3Sskax +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZjkMN7HrB6 +++ cat /tmp/tmp.ui9w3Sskax +++ rm /tmp/tmp.ZjkMN7HrB6 /tmp/tmp.ui9w3Sskax +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + local endpoint=34.61.147.156 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.61.147.156/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-cfg-1%22%7D%29&start=1773182857&end=1773182917&step=60' + grep '^"[0-9]' "1773176640" "1773176640" + get_metric_values mongodb_connections monitoring-2-0-711-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-711-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773182859 ++ /usr/sbin/date -u +%s + local end=1773182919 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rShIDmEui0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.POLEHmhAcD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rShIDmEui0 +++ cat /tmp/tmp.POLEHmhAcD +++ rm /tmp/tmp.rShIDmEui0 /tmp/tmp.POLEHmhAcD +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TNSQrDLQkv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VzeI6yelkU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TNSQrDLQkv +++ cat /tmp/tmp.VzeI6yelkU +++ rm /tmp/tmp.TNSQrDLQkv /tmp/tmp.VzeI6yelkU +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + local endpoint=34.61.147.156 + curl -s -k 'https://admin:admin@34.61.147.156/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-cfg-1%22%7D%29&start=1773182859&end=1773182919&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-711-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-711-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/sbin/date -u +%s -d '-1 minute' + local start=1773182863 ++ /usr/sbin/date -u +%s + local end=1773182923 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.nCMp18lqc8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NVWbH8rYY6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nCMp18lqc8 +++ cat /tmp/tmp.NVWbH8rYY6 +++ rm /tmp/tmp.nCMp18lqc8 /tmp/tmp.NVWbH8rYY6 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6x69qyEAvm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4FHb8W8rej +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6x69qyEAvm +++ cat /tmp/tmp.4FHb8W8rej +++ rm /tmp/tmp.6x69qyEAvm /tmp/tmp.4FHb8W8rej +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + local endpoint=34.61.147.156 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.61.147.156/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-711-monitoring-mongos-0%22%7D%29&start=1773182863&end=1773182923&step=60' "1773180364" "1773180364" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-10T10:50:15+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-10T22:50:15+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0ZzZCbDwtM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DRp81brfUW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0ZzZCbDwtM +++ cat /tmp/tmp.DRp81brfUW +++ rm /tmp/tmp.0ZzZCbDwtM /tmp/tmp.DRp81brfUW +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8Bv2DDeCmb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xWVewQqnk1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8Bv2DDeCmb +++ cat /tmp/tmp.xWVewQqnk1 +++ rm /tmp/tmp.8Bv2DDeCmb /tmp/tmp.xWVewQqnk1 +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + endpoint=34.61.147.156 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.61.147.156/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-10T22:50:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T22:44:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T22:38:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T22:32:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T22:26:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T22:20:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T22:14:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T22:08:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T22:02:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T21:56:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T21:50:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T21:44:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T21:38:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T21:32:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T21:26:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T21:20:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T21:14:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T21:08:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T21:02:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T20:56:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T20:50:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T20:44:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T20:38:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T20:32:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T20:26:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T20:20:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T20:14:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T20:08:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T20:02:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T19:56:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T19:50:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T19:44:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T19:38:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T19:32:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T19:26:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T19:20:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T19:14:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T19:08:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T19:02:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T18:56:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T18:50:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T18:44:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T18:38:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T18:32:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T18:26:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T18:20:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T18:14:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T18:08:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T18:02:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T17:56:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T17:50:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T17:44:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T17:38:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T17:32:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T17:26:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T17:20:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T17:14:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T17:08:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T17:02:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T16:56:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T16:50:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T16:44:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T16:38:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T16:32:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T16:26:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T16:20:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T16:14:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T16:08:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T16:02:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T15:56:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T15:50:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T15:44:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T15:38:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T15:32:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T15:26:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T15:20:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T15:14:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T15:08:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-10T15:02:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-10T14:56:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-10T14:50:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-10T14:44:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-10T14:38:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-10T14:32:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-10T14:26:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-10T14:20:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-10T14:14:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-10T14:08:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-10T14:02:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-10T13:56:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-10T13:50:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-10T13:44:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-10T13:38:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-10T13:32:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-10T13:26:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-10T13:20:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-10T13:14:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-10T13:08:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-10T13:02:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-10T12:56:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-10T12:50:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-10T12:44:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-10T12:38:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-10T12:32:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-10T12:26:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-10T12:20:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-10T12:14:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-10T12:08:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-10T12:02:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-10T11:56:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-10T11:50:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-10T11:44:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-10T11:38:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-10T11:32:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-10T11:26:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-10T11:20:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-10T11:14:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-10T11:08:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-10T11:02:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-10T10:56:00Z" } ] [ { "time_frame": 360, "timestamp": "2026-03-10T22:50:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T22:44:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T22:38:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T22:32:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T22:26:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T22:20:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T22:14:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T22:08:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T22:02:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T21:56:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T21:50:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T21:44:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T21:38:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T21:32:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T21:26:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T21:20:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T21:14:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T21:08:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T21:02:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T20:56:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T20:50:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T20:44:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T20:38:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T20:32:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T20:26:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T20:20:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T20:14:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T20:08:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T20:02:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T19:56:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T19:50:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T19:44:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T19:38:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T19:32:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T19:26:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T19:20:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T19:14:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T19:08:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T19:02:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T18:56:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T18:50:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T18:44:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T18:38:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T18:32:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T18:26:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T18:20:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T18:14:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T18:08:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T18:02:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T17:56:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T17:50:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T17:44:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T17:38:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T17:32:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T17:26:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T17:20:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T17:14:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T17:08:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T17:02:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T16:56:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T16:50:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T16:44:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T16:38:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T16:32:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T16:26:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T16:20:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T16:14:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T16:08:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T16:02:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T15:56:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T15:50:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T15:44:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T15:38:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T15:32:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T15:26:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T15:20:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T15:14:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T15:08:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-10T15:02:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-10T14:56:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-10T14:50:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-10T14:44:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-10T14:38:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-10T14:32:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-10T14:26:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-10T14:20:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-10T14:14:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-10T14:08:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-10T14:02:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-10T13:56:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-10T13:50:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-10T13:44:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-10T13:38:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-10T13:32:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-10T13:26:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-10T13:20:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-10T13:14:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-10T13:08:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-10T13:02:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-10T12:56:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-10T12:50:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-10T12:44:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-10T12:38:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-10T12:32:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-10T12:26:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-10T12:20:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-10T12:14:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-10T12:08:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-10T12:02:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-10T11:56:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-10T11:50:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-10T11:44:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-10T11:38:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-10T11:32:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-10T11:26:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-10T11:20:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-10T11:14:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-10T11:08:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-10T11:02:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-10T10:56:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2026-03-10T10:50:18+00:00 ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2026-03-10T22:50:18+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pZ3Gz3JEQu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MhzLnplCLy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pZ3Gz3JEQu +++ cat /tmp/tmp.MhzLnplCLy +++ rm /tmp/tmp.pZ3Gz3JEQu /tmp/tmp.MhzLnplCLy +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Nm16m5E8Fn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P1h2qdkL2o +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Nm16m5E8Fn +++ cat /tmp/tmp.P1h2qdkL2o +++ rm /tmp/tmp.Nm16m5E8Fn /tmp/tmp.P1h2qdkL2o +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + endpoint=34.61.147.156 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.61.147.156/v0/qan/GetReport ++ jq '.rows[].sparkline' + [[ [ { "time_frame": 360, "timestamp": "2026-03-10T22:50:00Z" }, { "point": 1, "time_frame": 360, "timestamp": "2026-03-10T22:44:00Z" }, { "point": 2, "time_frame": 360, "timestamp": "2026-03-10T22:38:00Z" }, { "point": 3, "time_frame": 360, "timestamp": "2026-03-10T22:32:00Z" }, { "point": 4, "time_frame": 360, "timestamp": "2026-03-10T22:26:00Z" }, { "point": 5, "time_frame": 360, "timestamp": "2026-03-10T22:20:00Z" }, { "point": 6, "time_frame": 360, "timestamp": "2026-03-10T22:14:00Z" }, { "point": 7, "time_frame": 360, "timestamp": "2026-03-10T22:08:00Z" }, { "point": 8, "time_frame": 360, "timestamp": "2026-03-10T22:02:00Z" }, { "point": 9, "time_frame": 360, "timestamp": "2026-03-10T21:56:00Z" }, { "point": 10, "time_frame": 360, "timestamp": "2026-03-10T21:50:00Z" }, { "point": 11, "time_frame": 360, "timestamp": "2026-03-10T21:44:00Z" }, { "point": 12, "time_frame": 360, "timestamp": "2026-03-10T21:38:00Z" }, { "point": 13, "time_frame": 360, "timestamp": "2026-03-10T21:32:00Z" }, { "point": 14, "time_frame": 360, "timestamp": "2026-03-10T21:26:00Z" }, { "point": 15, "time_frame": 360, "timestamp": "2026-03-10T21:20:00Z" }, { "point": 16, "time_frame": 360, "timestamp": "2026-03-10T21:14:00Z" }, { "point": 17, "time_frame": 360, "timestamp": "2026-03-10T21:08:00Z" }, { "point": 18, "time_frame": 360, "timestamp": "2026-03-10T21:02:00Z" }, { "point": 19, "time_frame": 360, "timestamp": "2026-03-10T20:56:00Z" }, { "point": 20, "time_frame": 360, "timestamp": "2026-03-10T20:50:00Z" }, { "point": 21, "time_frame": 360, "timestamp": "2026-03-10T20:44:00Z" }, { "point": 22, "time_frame": 360, "timestamp": "2026-03-10T20:38:00Z" }, { "point": 23, "time_frame": 360, "timestamp": "2026-03-10T20:32:00Z" }, { "point": 24, "time_frame": 360, "timestamp": "2026-03-10T20:26:00Z" }, { "point": 25, "time_frame": 360, "timestamp": "2026-03-10T20:20:00Z" }, { "point": 26, "time_frame": 360, "timestamp": "2026-03-10T20:14:00Z" }, { "point": 27, "time_frame": 360, "timestamp": "2026-03-10T20:08:00Z" }, { "point": 28, "time_frame": 360, "timestamp": "2026-03-10T20:02:00Z" }, { "point": 29, "time_frame": 360, "timestamp": "2026-03-10T19:56:00Z" }, { "point": 30, "time_frame": 360, "timestamp": "2026-03-10T19:50:00Z" }, { "point": 31, "time_frame": 360, "timestamp": "2026-03-10T19:44:00Z" }, { "point": 32, "time_frame": 360, "timestamp": "2026-03-10T19:38:00Z" }, { "point": 33, "time_frame": 360, "timestamp": "2026-03-10T19:32:00Z" }, { "point": 34, "time_frame": 360, "timestamp": "2026-03-10T19:26:00Z" }, { "point": 35, "time_frame": 360, "timestamp": "2026-03-10T19:20:00Z" }, { "point": 36, "time_frame": 360, "timestamp": "2026-03-10T19:14:00Z" }, { "point": 37, "time_frame": 360, "timestamp": "2026-03-10T19:08:00Z" }, { "point": 38, "time_frame": 360, "timestamp": "2026-03-10T19:02:00Z" }, { "point": 39, "time_frame": 360, "timestamp": "2026-03-10T18:56:00Z" }, { "point": 40, "time_frame": 360, "timestamp": "2026-03-10T18:50:00Z" }, { "point": 41, "time_frame": 360, "timestamp": "2026-03-10T18:44:00Z" }, { "point": 42, "time_frame": 360, "timestamp": "2026-03-10T18:38:00Z" }, { "point": 43, "time_frame": 360, "timestamp": "2026-03-10T18:32:00Z" }, { "point": 44, "time_frame": 360, "timestamp": "2026-03-10T18:26:00Z" }, { "point": 45, "time_frame": 360, "timestamp": "2026-03-10T18:20:00Z" }, { "point": 46, "time_frame": 360, "timestamp": "2026-03-10T18:14:00Z" }, { "point": 47, "time_frame": 360, "timestamp": "2026-03-10T18:08:00Z" }, { "point": 48, "time_frame": 360, "timestamp": "2026-03-10T18:02:00Z" }, { "point": 49, "time_frame": 360, "timestamp": "2026-03-10T17:56:00Z" }, { "point": 50, "time_frame": 360, "timestamp": "2026-03-10T17:50:00Z" }, { "point": 51, "time_frame": 360, "timestamp": "2026-03-10T17:44:00Z" }, { "point": 52, "time_frame": 360, "timestamp": "2026-03-10T17:38:00Z" }, { "point": 53, "time_frame": 360, "timestamp": "2026-03-10T17:32:00Z" }, { "point": 54, "time_frame": 360, "timestamp": "2026-03-10T17:26:00Z" }, { "point": 55, "time_frame": 360, "timestamp": "2026-03-10T17:20:00Z" }, { "point": 56, "time_frame": 360, "timestamp": "2026-03-10T17:14:00Z" }, { "point": 57, "time_frame": 360, "timestamp": "2026-03-10T17:08:00Z" }, { "point": 58, "time_frame": 360, "timestamp": "2026-03-10T17:02:00Z" }, { "point": 59, "time_frame": 360, "timestamp": "2026-03-10T16:56:00Z" }, { "point": 60, "time_frame": 360, "timestamp": "2026-03-10T16:50:00Z" }, { "point": 61, "time_frame": 360, "timestamp": "2026-03-10T16:44:00Z" }, { "point": 62, "time_frame": 360, "timestamp": "2026-03-10T16:38:00Z" }, { "point": 63, "time_frame": 360, "timestamp": "2026-03-10T16:32:00Z" }, { "point": 64, "time_frame": 360, "timestamp": "2026-03-10T16:26:00Z" }, { "point": 65, "time_frame": 360, "timestamp": "2026-03-10T16:20:00Z" }, { "point": 66, "time_frame": 360, "timestamp": "2026-03-10T16:14:00Z" }, { "point": 67, "time_frame": 360, "timestamp": "2026-03-10T16:08:00Z" }, { "point": 68, "time_frame": 360, "timestamp": "2026-03-10T16:02:00Z" }, { "point": 69, "time_frame": 360, "timestamp": "2026-03-10T15:56:00Z" }, { "point": 70, "time_frame": 360, "timestamp": "2026-03-10T15:50:00Z" }, { "point": 71, "time_frame": 360, "timestamp": "2026-03-10T15:44:00Z" }, { "point": 72, "time_frame": 360, "timestamp": "2026-03-10T15:38:00Z" }, { "point": 73, "time_frame": 360, "timestamp": "2026-03-10T15:32:00Z" }, { "point": 74, "time_frame": 360, "timestamp": "2026-03-10T15:26:00Z" }, { "point": 75, "time_frame": 360, "timestamp": "2026-03-10T15:20:00Z" }, { "point": 76, "time_frame": 360, "timestamp": "2026-03-10T15:14:00Z" }, { "point": 77, "time_frame": 360, "timestamp": "2026-03-10T15:08:00Z" }, { "point": 78, "time_frame": 360, "timestamp": "2026-03-10T15:02:00Z" }, { "point": 79, "time_frame": 360, "timestamp": "2026-03-10T14:56:00Z" }, { "point": 80, "time_frame": 360, "timestamp": "2026-03-10T14:50:00Z" }, { "point": 81, "time_frame": 360, "timestamp": "2026-03-10T14:44:00Z" }, { "point": 82, "time_frame": 360, "timestamp": "2026-03-10T14:38:00Z" }, { "point": 83, "time_frame": 360, "timestamp": "2026-03-10T14:32:00Z" }, { "point": 84, "time_frame": 360, "timestamp": "2026-03-10T14:26:00Z" }, { "point": 85, "time_frame": 360, "timestamp": "2026-03-10T14:20:00Z" }, { "point": 86, "time_frame": 360, "timestamp": "2026-03-10T14:14:00Z" }, { "point": 87, "time_frame": 360, "timestamp": "2026-03-10T14:08:00Z" }, { "point": 88, "time_frame": 360, "timestamp": "2026-03-10T14:02:00Z" }, { "point": 89, "time_frame": 360, "timestamp": "2026-03-10T13:56:00Z" }, { "point": 90, "time_frame": 360, "timestamp": "2026-03-10T13:50:00Z" }, { "point": 91, "time_frame": 360, "timestamp": "2026-03-10T13:44:00Z" }, { "point": 92, "time_frame": 360, "timestamp": "2026-03-10T13:38:00Z" }, { "point": 93, "time_frame": 360, "timestamp": "2026-03-10T13:32:00Z" }, { "point": 94, "time_frame": 360, "timestamp": "2026-03-10T13:26:00Z" }, { "point": 95, "time_frame": 360, "timestamp": "2026-03-10T13:20:00Z" }, { "point": 96, "time_frame": 360, "timestamp": "2026-03-10T13:14:00Z" }, { "point": 97, "time_frame": 360, "timestamp": "2026-03-10T13:08:00Z" }, { "point": 98, "time_frame": 360, "timestamp": "2026-03-10T13:02:00Z" }, { "point": 99, "time_frame": 360, "timestamp": "2026-03-10T12:56:00Z" }, { "point": 100, "time_frame": 360, "timestamp": "2026-03-10T12:50:00Z" }, { "point": 101, "time_frame": 360, "timestamp": "2026-03-10T12:44:00Z" }, { "point": 102, "time_frame": 360, "timestamp": "2026-03-10T12:38:00Z" }, { "point": 103, "time_frame": 360, "timestamp": "2026-03-10T12:32:00Z" }, { "point": 104, "time_frame": 360, "timestamp": "2026-03-10T12:26:00Z" }, { "point": 105, "time_frame": 360, "timestamp": "2026-03-10T12:20:00Z" }, { "point": 106, "time_frame": 360, "timestamp": "2026-03-10T12:14:00Z" }, { "point": 107, "time_frame": 360, "timestamp": "2026-03-10T12:08:00Z" }, { "point": 108, "time_frame": 360, "timestamp": "2026-03-10T12:02:00Z" }, { "point": 109, "time_frame": 360, "timestamp": "2026-03-10T11:56:00Z" }, { "point": 110, "time_frame": 360, "timestamp": "2026-03-10T11:50:00Z" }, { "point": 111, "time_frame": 360, "timestamp": "2026-03-10T11:44:00Z" }, { "point": 112, "time_frame": 360, "timestamp": "2026-03-10T11:38:00Z" }, { "point": 113, "time_frame": 360, "timestamp": "2026-03-10T11:32:00Z" }, { "point": 114, "time_frame": 360, "timestamp": "2026-03-10T11:26:00Z" }, { "point": 115, "time_frame": 360, "timestamp": "2026-03-10T11:20:00Z" }, { "point": 116, "time_frame": 360, "timestamp": "2026-03-10T11:14:00Z" }, { "point": 117, "time_frame": 360, "timestamp": "2026-03-10T11:08:00Z" }, { "point": 118, "time_frame": 360, "timestamp": "2026-03-10T11:02:00Z" }, { "point": 119, "time_frame": 360, "timestamp": "2026-03-10T10:56:00Z" } ] != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.B7SpuMuBp3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JPkHh9YAQH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.B7SpuMuBp3 +++ cat /tmp/tmp.JPkHh9YAQH +++ rm /tmp/tmp.B7SpuMuBp3 /tmp/tmp.JPkHh9YAQH +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZHaHwEKoSK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aKjcjoJZnH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZHaHwEKoSK +++ cat /tmp/tmp.aKjcjoJZnH +++ rm /tmp/tmp.ZHaHwEKoSK /tmp/tmp.aKjcjoJZnH +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aHVRV8VQT0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ut0QplqaN2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aHVRV8VQT0 +++ cat /tmp/tmp.ut0QplqaN2 +++ rm /tmp/tmp.aHVRV8VQT0 /tmp/tmp.ut0QplqaN2 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oUKsjCTg6q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GORIBq5Mrl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.oUKsjCTg6q +++ cat /tmp/tmp.GORIBq5Mrl +++ rm /tmp/tmp.oUKsjCTg6q /tmp/tmp.GORIBq5Mrl +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jlZgleEygN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jGTqG5ngpi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jlZgleEygN +++ cat /tmp/tmp.jGTqG5ngpi +++ rm /tmp/tmp.jlZgleEygN /tmp/tmp.jGTqG5ngpi +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vOMSp2UXg4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ys7X6DEcB8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vOMSp2UXg4 +++ cat /tmp/tmp.ys7X6DEcB8 +++ rm /tmp/tmp.vOMSp2UXg4 /tmp/tmp.ys7X6DEcB8 +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dg52mS8hOR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RnumeRHpzu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dg52mS8hOR +++ cat /tmp/tmp.RnumeRHpzu +++ rm /tmp/tmp.dg52mS8hOR /tmp/tmp.RnumeRHpzu +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OK3cEZrNOt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HyEioBMSiZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OK3cEZrNOt +++ cat /tmp/tmp.HyEioBMSiZ +++ rm /tmp/tmp.OK3cEZrNOt /tmp/tmp.HyEioBMSiZ +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FdtGprmB6n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XoD0ZgVkyh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FdtGprmB6n +++ cat /tmp/tmp.XoD0ZgVkyh +++ rm /tmp/tmp.FdtGprmB6n /tmp/tmp.XoD0ZgVkyh +++ return 0 ++ for instance in $(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='NAME:.metadata.name') ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EyEozJ0xTG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N1CuQJW6Yf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EyEozJ0xTG +++ cat /tmp/tmp.N1CuQJW6Yf +++ rm /tmp/tmp.EyEozJ0xTG /tmp/tmp.N1CuQJW6Yf +++ return 0 ++ echo /node_id/5ae64577-8993-4a6d-8a82-302399b474ce /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/5ae64577-8993-4a6d-8a82-302399b474ce /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 ++ nodeList=('/node_id/5ae64577-8993-4a6d-8a82-302399b474ce' '/node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b' '/node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab' '/node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26' '/node_id/8fb74260-ade6-4aeb-b007-9329b5894b52' '/node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1' '/node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748' '/node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3' '/node_id/3c870ea2-fe66-4efc-85d2-6489967925e7') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5ae64577-8993-4a6d-8a82-302399b474ce +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.v4rBErJRzp +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.g1qxOOkvIC ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.v4rBErJRzp ++++ cat /tmp/tmp.g1qxOOkvIC ++++ rm /tmp/tmp.v4rBErJRzp /tmp/tmp.g1qxOOkvIC ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yJO15y9icC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tofitjxfvc ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.yJO15y9icC ++++ cat /tmp/tmp.tofitjxfvc ++++ rm /tmp/tmp.yJO15y9icC /tmp/tmp.tofitjxfvc ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ShzgHBZwTM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kuGkDiu9Vd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ShzgHBZwTM +++ cat /tmp/tmp.kuGkDiu9Vd +++ rm /tmp/tmp.ShzgHBZwTM /tmp/tmp.kuGkDiu9Vd +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.woLesdBi53 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.731FHcaMsV ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.woLesdBi53 ++++ cat /tmp/tmp.731FHcaMsV ++++ rm /tmp/tmp.woLesdBi53 /tmp/tmp.731FHcaMsV ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.J04aHG2sBs +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.WqPGisjEX9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.J04aHG2sBs ++++ cat /tmp/tmp.WqPGisjEX9 ++++ rm /tmp/tmp.J04aHG2sBs /tmp/tmp.WqPGisjEX9 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T4YWwaDw0N ++++ mktemp +++ local LAST_ERR=/tmp/tmp.r3WtyvwXwy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T4YWwaDw0N +++ cat /tmp/tmp.r3WtyvwXwy +++ rm /tmp/tmp.T4YWwaDw0N /tmp/tmp.r3WtyvwXwy +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab ++++ get_pmm_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.O1iRdztUn6 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yv3AA9k49y ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.O1iRdztUn6 ++++ cat /tmp/tmp.yv3AA9k49y ++++ rm /tmp/tmp.O1iRdztUn6 /tmp/tmp.yv3AA9k49y ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.SK4Os01Z1B +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1D9PMMBaJW ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.SK4Os01Z1B ++++ cat /tmp/tmp.1D9PMMBaJW ++++ rm /tmp/tmp.SK4Os01Z1B /tmp/tmp.1D9PMMBaJW ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MAKwupqJ5T ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oshgcZ6oSj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MAKwupqJ5T +++ cat /tmp/tmp.oshgcZ6oSj +++ rm /tmp/tmp.MAKwupqJ5T /tmp/tmp.oshgcZ6oSj +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Ingb0wLauH +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.86YduZfOfH ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Ingb0wLauH ++++ cat /tmp/tmp.86YduZfOfH ++++ rm /tmp/tmp.Ingb0wLauH /tmp/tmp.86YduZfOfH ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.eiqLkcYexE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uK4AHljIKM ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.eiqLkcYexE ++++ cat /tmp/tmp.uK4AHljIKM ++++ rm /tmp/tmp.eiqLkcYexE /tmp/tmp.uK4AHljIKM ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RNlmt5xfQd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DBolyhylVs +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RNlmt5xfQd +++ cat /tmp/tmp.DBolyhylVs +++ rm /tmp/tmp.RNlmt5xfQd /tmp/tmp.DBolyhylVs +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sYfX6DCKTt +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6ZFzs4rYN4 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sYfX6DCKTt ++++ cat /tmp/tmp.6ZFzs4rYN4 ++++ rm /tmp/tmp.sYfX6DCKTt /tmp/tmp.6ZFzs4rYN4 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rfXBNLAioS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SZKcghdffs ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.rfXBNLAioS ++++ cat /tmp/tmp.SZKcghdffs ++++ rm /tmp/tmp.rfXBNLAioS /tmp/tmp.SZKcghdffs ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WIAwLSnX38 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iQt3YuSRlC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.WIAwLSnX38 +++ cat /tmp/tmp.iQt3YuSRlC +++ rm /tmp/tmp.WIAwLSnX38 /tmp/tmp.iQt3YuSRlC +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sOzWUnnEDe +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.SDtg6g7hhm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.sOzWUnnEDe ++++ cat /tmp/tmp.SDtg6g7hhm ++++ rm /tmp/tmp.sOzWUnnEDe /tmp/tmp.SDtg6g7hhm ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.UcHqyInuf9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gidwqHWo61 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.UcHqyInuf9 ++++ cat /tmp/tmp.gidwqHWo61 ++++ rm /tmp/tmp.UcHqyInuf9 /tmp/tmp.gidwqHWo61 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2IuEOUIifi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.j579PJOV6k +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2IuEOUIifi +++ cat /tmp/tmp.j579PJOV6k +++ rm /tmp/tmp.2IuEOUIifi /tmp/tmp.j579PJOV6k +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.aXs1mi6Xcc +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.m8cS32QDxu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.aXs1mi6Xcc ++++ cat /tmp/tmp.m8cS32QDxu ++++ rm /tmp/tmp.aXs1mi6Xcc /tmp/tmp.m8cS32QDxu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oFv9f0lxAO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pLm0ygNUgf ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.oFv9f0lxAO ++++ cat /tmp/tmp.pLm0ygNUgf ++++ rm /tmp/tmp.oFv9f0lxAO /tmp/tmp.pLm0ygNUgf ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zfwlFhZelF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SxOi6L0Mtg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zfwlFhZelF +++ cat /tmp/tmp.SxOi6L0Mtg +++ rm /tmp/tmp.zfwlFhZelF /tmp/tmp.SxOi6L0Mtg +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Y0Ac6tKgHO +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.NBeMcR9pN2 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Y0Ac6tKgHO ++++ cat /tmp/tmp.NBeMcR9pN2 ++++ rm /tmp/tmp.Y0Ac6tKgHO /tmp/tmp.NBeMcR9pN2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ll8LxfjuLf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EIi3rqKZg9 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.ll8LxfjuLf ++++ cat /tmp/tmp.EIi3rqKZg9 ++++ rm /tmp/tmp.ll8LxfjuLf /tmp/tmp.EIi3rqKZg9 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bGBWTS9h0Z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vXzudANnhx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bGBWTS9h0Z +++ cat /tmp/tmp.vXzudANnhx +++ rm /tmp/tmp.bGBWTS9h0Z /tmp/tmp.vXzudANnhx +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vE2kFJ8BbD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.v0nDHYM6uw ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.vE2kFJ8BbD ++++ cat /tmp/tmp.v0nDHYM6uw ++++ rm /tmp/tmp.vE2kFJ8BbD /tmp/tmp.v0nDHYM6uw ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.8E5OdXIGEP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.amKEXmSses ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.8E5OdXIGEP ++++ cat /tmp/tmp.amKEXmSses ++++ rm /tmp/tmp.8E5OdXIGEP /tmp/tmp.amKEXmSses ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aIAGo5s2W0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Fv4Anaxd8p +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aIAGo5s2W0 +++ cat /tmp/tmp.Fv4Anaxd8p +++ rm /tmp/tmp.aIAGo5s2W0 /tmp/tmp.Fv4Anaxd8p +++ return 0 ++ echo /node_id/5ae64577-8993-4a6d-8a82-302399b474ce /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/5ae64577-8993-4a6d-8a82-302399b474ce ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 ']' + for node_id in "${nodeList_from_pmm[@]}" + '[' -z /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 ']' + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.X8WFUBTPgz ++ mktemp + local LAST_ERR=/tmp/tmp.GBLAwlRTMJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.X8WFUBTPgz perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.GBLAwlRTMJ + rm /tmp/tmp.X8WFUBTPgz /tmp/tmp.GBLAwlRTMJ + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted........................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted...........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.NAKK73cdpe ++ mktemp + local LAST_ERR=/tmp/tmp.SeHmhDOkZV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NAKK73cdpe NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27019/TCP 14m + cat /tmp/tmp.SeHmhDOkZV + rm /tmp/tmp.NAKK73cdpe /tmp/tmp.SeHmhDOkZV + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.glMoyZtlmy ++ mktemp + local LAST_ERR=/tmp/tmp.0K5ZxD0RuB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.glMoyZtlmy NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27019/TCP 14m + cat /tmp/tmp.0K5ZxD0RuB + rm /tmp/tmp.glMoyZtlmy /tmp/tmp.0K5ZxD0RuB + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.xcuKa5e7l3 ++ mktemp + local LAST_ERR=/tmp/tmp.z5HCFpBDvZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xcuKa5e7l3 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 34.118.239.58 27019/TCP 14m + cat /tmp/tmp.z5HCFpBDvZ + rm /tmp/tmp.xcuKa5e7l3 /tmp/tmp.z5HCFpBDvZ + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/5ae64577-8993-4a6d-8a82-302399b474ce /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 ++ nodeList=('/node_id/5ae64577-8993-4a6d-8a82-302399b474ce' '/node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b' '/node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab' '/node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26' '/node_id/8fb74260-ade6-4aeb-b007-9329b5894b52' '/node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1' '/node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748' '/node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3' '/node_id/3c870ea2-fe66-4efc-85d2-6489967925e7') ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5ae64577-8993-4a6d-8a82-302399b474ce +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OMReNNEGuT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ioAc0N5XGu ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.OMReNNEGuT ++++ cat /tmp/tmp.ioAc0N5XGu ++++ rm /tmp/tmp.OMReNNEGuT /tmp/tmp.ioAc0N5XGu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hRe938uVLD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eodYhpH4gj ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hRe938uVLD ++++ cat /tmp/tmp.eodYhpH4gj ++++ rm /tmp/tmp.hRe938uVLD /tmp/tmp.eodYhpH4gj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aBbntyO6g0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YkOSXBWfkx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aBbntyO6g0 +++ cat /tmp/tmp.YkOSXBWfkx +++ rm /tmp/tmp.aBbntyO6g0 /tmp/tmp.YkOSXBWfkx +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6244bb6c-42bf-493d-875d-6661a9aedc9b +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NYCHxW82VU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tdMgzRoYOV ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.NYCHxW82VU ++++ cat /tmp/tmp.tdMgzRoYOV ++++ rm /tmp/tmp.NYCHxW82VU /tmp/tmp.tdMgzRoYOV ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zn0tAm7xCm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.H5zNKFsjXN ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.zn0tAm7xCm ++++ cat /tmp/tmp.H5zNKFsjXN ++++ rm /tmp/tmp.zn0tAm7xCm /tmp/tmp.H5zNKFsjXN ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E995Z97aZs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yB2MOPjPjG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.E995Z97aZs +++ cat /tmp/tmp.yB2MOPjPjG +++ rm /tmp/tmp.E995Z97aZs /tmp/tmp.yB2MOPjPjG +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/822d6ec8-0190-419b-beb2-6bdf463e7bab +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.YOBc1F2a4M +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dyQgauXZLZ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.YOBc1F2a4M ++++ cat /tmp/tmp.dyQgauXZLZ ++++ rm /tmp/tmp.YOBc1F2a4M /tmp/tmp.dyQgauXZLZ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kloiVAsaqV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.y8EczRVHmE ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.kloiVAsaqV ++++ cat /tmp/tmp.y8EczRVHmE ++++ rm /tmp/tmp.kloiVAsaqV /tmp/tmp.y8EczRVHmE ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9dteTnUSg8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IjgHQwd3ug +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9dteTnUSg8 +++ cat /tmp/tmp.IjgHQwd3ug +++ rm /tmp/tmp.9dteTnUSg8 /tmp/tmp.IjgHQwd3ug +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9d2b6e1c-7daf-4085-85b1-96cf0ab36a26 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.k4B58dnlQM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.abbRkKRAnT ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.k4B58dnlQM ++++ cat /tmp/tmp.abbRkKRAnT ++++ rm /tmp/tmp.k4B58dnlQM /tmp/tmp.abbRkKRAnT ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.muiUOyTbIE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3TwIOX7mtd ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.muiUOyTbIE ++++ cat /tmp/tmp.3TwIOX7mtd ++++ rm /tmp/tmp.muiUOyTbIE /tmp/tmp.3TwIOX7mtd ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hvXiqueOcC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CCrj2frAM1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hvXiqueOcC +++ cat /tmp/tmp.CCrj2frAM1 +++ rm /tmp/tmp.hvXiqueOcC /tmp/tmp.CCrj2frAM1 +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8fb74260-ade6-4aeb-b007-9329b5894b52 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hWzHcj0Ztu +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.kbOoGMW50Q ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.hWzHcj0Ztu ++++ cat /tmp/tmp.kbOoGMW50Q ++++ rm /tmp/tmp.hWzHcj0Ztu /tmp/tmp.kbOoGMW50Q ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9usgUCncKn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.evfUhO657g ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.9usgUCncKn ++++ cat /tmp/tmp.evfUhO657g ++++ rm /tmp/tmp.9usgUCncKn /tmp/tmp.evfUhO657g ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EmUO25orQn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.puLtqStT4x +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.EmUO25orQn +++ cat /tmp/tmp.puLtqStT4x +++ rm /tmp/tmp.EmUO25orQn /tmp/tmp.puLtqStT4x +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/42d5d4f0-9299-4b59-8b1c-4702ac768ea1 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gCC4aWdf4t +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Kkk7WtGw2G ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gCC4aWdf4t ++++ cat /tmp/tmp.Kkk7WtGw2G ++++ rm /tmp/tmp.gCC4aWdf4t /tmp/tmp.Kkk7WtGw2G ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.RV0wtGAwwh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.EPWbtZlo80 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.RV0wtGAwwh ++++ cat /tmp/tmp.EPWbtZlo80 ++++ rm /tmp/tmp.RV0wtGAwwh /tmp/tmp.EPWbtZlo80 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rpdJYyx9uH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TwgMiIlgNw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rpdJYyx9uH +++ cat /tmp/tmp.TwgMiIlgNw +++ rm /tmp/tmp.rpdJYyx9uH /tmp/tmp.TwgMiIlgNw +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4847fc78-fec9-46be-b1bf-a49a32fe3748 ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VNkQsjcjam +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tM92kxOpKX ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.VNkQsjcjam ++++ cat /tmp/tmp.tM92kxOpKX ++++ rm /tmp/tmp.VNkQsjcjam /tmp/tmp.tM92kxOpKX ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.a0JkseLuOh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ThmpNrSbpt ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.a0JkseLuOh ++++ cat /tmp/tmp.ThmpNrSbpt ++++ rm /tmp/tmp.a0JkseLuOh /tmp/tmp.ThmpNrSbpt ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vm3zi5YrVU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8nX4v5X1ao +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Vm3zi5YrVU +++ cat /tmp/tmp.8nX4v5X1ao +++ rm /tmp/tmp.Vm3zi5YrVU /tmp/tmp.8nX4v5X1ao +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/043c5f6e-49f6-43a0-9b13-726ffc9c07f3 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.IllY72sJuD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4faIf4NuPg ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.IllY72sJuD ++++ cat /tmp/tmp.4faIf4NuPg ++++ rm /tmp/tmp.IllY72sJuD /tmp/tmp.4faIf4NuPg ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pqlAHiuAMq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.D4Edzg8EvW ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.pqlAHiuAMq ++++ cat /tmp/tmp.D4Edzg8EvW ++++ rm /tmp/tmp.pqlAHiuAMq /tmp/tmp.D4Edzg8EvW ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.J2rKEsA0pR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.buWkL2HdyJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.J2rKEsA0pR +++ cat /tmp/tmp.buWkL2HdyJ +++ rm /tmp/tmp.J2rKEsA0pR /tmp/tmp.buWkL2HdyJ +++ return 0 ++ for node_id in "${nodeList[@]}" ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_pmm_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/3c870ea2-fe66-4efc-85d2-6489967925e7 +++ awk '{print $4}' ++++ get_pmm_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.LwNotExnpz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Enp6vYor63 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.LwNotExnpz ++++ cat /tmp/tmp.Enp6vYor63 ++++ rm /tmp/tmp.LwNotExnpz /tmp/tmp.Enp6vYor63 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rz9T3CGINN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BR7luuRpmn ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in $(seq 0 2) ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.rz9T3CGINN ++++ cat /tmp/tmp.BR7luuRpmn ++++ rm /tmp/tmp.rz9T3CGINN /tmp/tmp.BR7luuRpmn ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NIrivgQdsc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bq1OpjrLIc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl exec -n monitoring-2-0-711 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.61.147.156/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NIrivgQdsc +++ cat /tmp/tmp.bq1OpjrLIc +++ rm /tmp/tmp.NIrivgQdsc /tmp/tmp.bq1OpjrLIc +++ return 0 ++ echo + desc 'check customClusterName for pmm' + set +o xtrace ----------------------------------------------------------------------------------- check customClusterName for pmm ----------------------------------------------------------------------------------- + custom_name=custom-cluster-name + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' ++ mktemp + local LAST_OUT=/tmp/tmp.l5494cIsKV ++ mktemp + local LAST_ERR=/tmp/tmp.VhA7q4VVZx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":false}, {"op":"add","path":"/spec/pmm/customClusterName","value":custom-cluster-name}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l5494cIsKV perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.VhA7q4VVZx + rm /tmp/tmp.l5494cIsKV /tmp/tmp.VhA7q4VVZx + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in $(seq 0 $last_pod) + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready...........OK + for i in $(seq 0 $last_pod) + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready..........OK + for i in $(seq 0 $last_pod) + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4I1nI7UODK +++ mktemp ++ local LAST_ERR=/tmp/tmp.jOVN2U4DAb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4I1nI7UODK ++ cat /tmp/tmp.jOVN2U4DAb ++ rm /tmp/tmp.4I1nI7UODK /tmp/tmp.jOVN2U4DAb ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JxTf8OKyYM +++ mktemp ++ local LAST_ERR=/tmp/tmp.H52HldCrpZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].nonvoting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JxTf8OKyYM ++ cat /tmp/tmp.H52HldCrpZ ++ rm /tmp/tmp.JxTf8OKyYM /tmp/tmp.H52HldCrpZ ++ return 0 + [[ '' == \t\r\u\e ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.We3mF4G6oP +++ mktemp ++ local LAST_ERR=/tmp/tmp.zgbebHGGHr ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].hidden.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.We3mF4G6oP ++ cat /tmp/tmp.zgbebHGGHr ++ rm /tmp/tmp.We3mF4G6oP /tmp/tmp.zgbebHGGHr ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.............. ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z3upK9D34k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.g6oY24KMnu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.z3upK9D34k +++ cat /tmp/tmp.g6oY24KMnu +++ rm /tmp/tmp.z3upK9D34k /tmp/tmp.g6oY24KMnu +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tH8SNoxr98 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cYz0Ybcae5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in $(seq 0 2) +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tH8SNoxr98 +++ cat /tmp/tmp.cYz0Ybcae5 +++ rm /tmp/tmp.tH8SNoxr98 /tmp/tmp.cYz0Ybcae5 +++ return 0 ++ local ip=34.61.147.156 ++ '[' -n 34.61.147.156 -a 34.61.147.156 '!=' null ']' ++ echo 34.61.147.156 ++ return + curl -s -k -d '{"service_type":"MONGODB_SERVICE"}' https://admin:admin@34.61.147.156/v1/inventory/Services/List + check_custom_cluster_name monitoring-2-0-711-monitoring-mongos-0 /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + local pod_service_name=monitoring-2-0-711-monitoring-mongos-0 + local pmm_services_file=/tmp/tmp.jvfWgN3tn4/pmm_service_list.json + echo 'Checking monitoring-2-0-711-monitoring-mongos-0' Checking monitoring-2-0-711-monitoring-mongos-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-711-monitoring-mongos-0") | .cluster' /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-711-monitoring-rs0-0 /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + local pod_service_name=monitoring-2-0-711-monitoring-rs0-0 + local pmm_services_file=/tmp/tmp.jvfWgN3tn4/pmm_service_list.json + echo 'Checking monitoring-2-0-711-monitoring-rs0-0' Checking monitoring-2-0-711-monitoring-rs0-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-711-monitoring-rs0-0") | .cluster' /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + check_custom_cluster_name monitoring-2-0-711-monitoring-cfg-0 /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + local pod_service_name=monitoring-2-0-711-monitoring-cfg-0 + local pmm_services_file=/tmp/tmp.jvfWgN3tn4/pmm_service_list.json + echo 'Checking monitoring-2-0-711-monitoring-cfg-0' Checking monitoring-2-0-711-monitoring-cfg-0 ++ jq -r '.mongodb[] | select(.service_name=="monitoring-2-0-711-monitoring-cfg-0") | .cluster' /tmp/tmp.jvfWgN3tn4/pmm_service_list.json + pmm_service_cluster=custom-cluster-name + [[ custom-cluster-name != custom-cluster-name ]] + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d4QRbQvEIa +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ip9aeqOccu ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.d4QRbQvEIa ++ cat /tmp/tmp.Ip9aeqOccu ++ rm /tmp/tmp.d4QRbQvEIa /tmp/tmp.Ip9aeqOccu ++ return 0 + [[ 0 != 0 ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-711 + local namespace=monitoring-2-0-711 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_backups + desc 'Delete psmdb-backup' + set +o xtrace ----------------------------------------------------------------------------------- Delete psmdb-backup ----------------------------------------------------------------------------------- ++ kubectl_bin get psmdb-backup --no-headers ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.v0PcXDKwGB +++ mktemp ++ local LAST_ERR=/tmp/tmp.jgLlHSetBC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in $(seq 0 2) ++ set +e ++ kubectl get psmdb-backup --no-headers ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.v0PcXDKwGB ++ cat /tmp/tmp.jgLlHSetBC No resources found in monitoring-2-0-711 namespace. ++ rm /tmp/tmp.v0PcXDKwGB /tmp/tmp.jgLlHSetBC ++ return 0 + '[' 0 '!=' 0 ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.b19ejlzTnm ++ mktemp + local LAST_ERR=/tmp/tmp.nlitJ7hhk6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b19ejlzTnm customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.nlitJ7hhk6 + rm /tmp/tmp.b19ejlzTnm /tmp/tmp.nlitJ7hhk6 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/crd.yaml ++ grep -v '\-\-\-' grep: warning: stray \ before - grep: warning: stray \ before - + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.qBvqeneR9b ++ mktemp + local LAST_ERR=/tmp/tmp.zXi894Rn05 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qBvqeneR9b + cat /tmp/tmp.zXi894Rn05 + rm /tmp/tmp.qBvqeneR9b /tmp/tmp.zXi894Rn05 + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.2G0FiU4AnD ++ mktemp + local LAST_ERR=/tmp/tmp.RpLDsZnRrS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2G0FiU4AnD + cat /tmp/tmp.RpLDsZnRrS + rm /tmp/tmp.2G0FiU4AnD /tmp/tmp.RpLDsZnRrS + return 0 + for crd_name in $(yq eval '.metadata.name' "${src_dir}/deploy/crd.yaml" | grep -v '\-\-\-') + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MUFudFVsiv ++ mktemp + local LAST_ERR=/tmp/tmp.FC1xw1MpG7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MUFudFVsiv + cat /tmp/tmp.FC1xw1MpG7 + rm /tmp/tmp.MUFudFVsiv /tmp/tmp.FC1xw1MpG7 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.tQ5xrpFLKz ++ mktemp + local LAST_ERR=/tmp/tmp.aRcdawyhtp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-2219/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tQ5xrpFLKz clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.aRcdawyhtp + rm /tmp/tmp.tQ5xrpFLKz /tmp/tmp.aRcdawyhtp + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.9ZoZRARLur ++ mktemp + local LAST_ERR=/tmp/tmp.Bf59mhdmgx + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9ZoZRARLur namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted from cert-manager namespace serviceaccount "cert-manager" deleted from cert-manager namespace serviceaccount "cert-manager-webhook" deleted from cert-manager namespace clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace role.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted from kube-system namespace rolebinding.rbac.authorization.k8s.io "cert-manager-tokenrequest" deleted from cert-manager namespace rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted from cert-manager namespace service "cert-manager-cainjector" deleted from cert-manager namespace service "cert-manager" deleted from cert-manager namespace service "cert-manager-webhook" deleted from cert-manager namespace mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.Bf59mhdmgx Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9ZoZRARLur namespace "cert-manager" deleted + cat /tmp/tmp.Bf59mhdmgx Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in $(seq 0 2) + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.9ZoZRARLur + cat /tmp/tmp.Bf59mhdmgx Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.9ZoZRARLur + cat /tmp/tmp.Bf59mhdmgx Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-tokenrequest" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.19.3/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.9ZoZRARLur /tmp/tmp.Bf59mhdmgx + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-711 + rm -rf /tmp/tmp.jvfWgN3tn4 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.ygTCxII4cK + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_OUT=/tmp/tmp.PO4j1tT5NN ++ mktemp + local LAST_ERR=/tmp/tmp.Cjv87eHrGX + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.0RcBO9ZWKu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-711 + for i in $(seq 0 2) + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator