Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-20438 + local ns=monitoring-2-0-20438 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.aGj1xJ9eQD ++ mktemp + local LAST_ERR=/tmp/tmp.y6ehxkCpPr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aGj1xJ9eQD customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.y6ehxkCpPr + rm /tmp/tmp.aGj1xJ9eQD /tmp/tmp.y6ehxkCpPr + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.VI3bOLgCPr ++ mktemp + local LAST_ERR=/tmp/tmp.4w3GMrQxI5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VI3bOLgCPr + cat /tmp/tmp.4w3GMrQxI5 + rm /tmp/tmp.VI3bOLgCPr /tmp/tmp.4w3GMrQxI5 + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.R588dxg9WN ++ mktemp + local LAST_ERR=/tmp/tmp.XoUyl4waqe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R588dxg9WN + cat /tmp/tmp.XoUyl4waqe + rm /tmp/tmp.R588dxg9WN /tmp/tmp.XoUyl4waqe + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JZZewckMAO ++ mktemp + local LAST_ERR=/tmp/tmp.WKKuKyPIJ6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JZZewckMAO + cat /tmp/tmp.WKKuKyPIJ6 + rm /tmp/tmp.JZZewckMAO /tmp/tmp.WKKuKyPIJ6 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.scYwpaE1Zm ++ mktemp + local LAST_ERR=/tmp/tmp.gWiuv2j9a8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.scYwpaE1Zm clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.gWiuv2j9a8 + rm /tmp/tmp.scYwpaE1Zm /tmp/tmp.gWiuv2j9a8 + return 0 + check_crd_for_deletion PR-1552-a008e872 + local git_tag=PR-1552-a008e872 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1552-a008e872/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5YaoI7aCPx +++ mktemp ++ local LAST_ERR=/tmp/tmp.oPfISfh6yw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5YaoI7aCPx ++ cat /tmp/tmp.oPfISfh6yw Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5YaoI7aCPx ++ cat /tmp/tmp.oPfISfh6yw Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.5YaoI7aCPx ++ cat /tmp/tmp.oPfISfh6yw Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.5YaoI7aCPx ++ cat /tmp/tmp.oPfISfh6yw Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.5YaoI7aCPx /tmp/tmp.oPfISfh6yw ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.LeMiQ8mJIn ++ mktemp + local LAST_OUT=/tmp/tmp.aXosrg0lvS ++ mktemp + local LAST_ERR=/tmp/tmp.gAlpTdjjb9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.TsjWiDXNyS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LeMiQ8mJIn + cat /tmp/tmp.gAlpTdjjb9 + rm /tmp/tmp.LeMiQ8mJIn /tmp/tmp.gAlpTdjjb9 + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-31731" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aXosrg0lvS namespace "psmdb-operator" deleted + cat /tmp/tmp.TsjWiDXNyS + rm /tmp/tmp.aXosrg0lvS /tmp/tmp.TsjWiDXNyS + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.KzrZdukbNL ++ mktemp + local LAST_ERR=/tmp/tmp.HmZSbGv0QV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KzrZdukbNL + cat /tmp/tmp.HmZSbGv0QV + rm /tmp/tmp.KzrZdukbNL /tmp/tmp.HmZSbGv0QV + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fs5N9uA9tw ++ mktemp + local LAST_ERR=/tmp/tmp.DlPQRSUMGJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fs5N9uA9tw namespace/psmdb-operator created + cat /tmp/tmp.DlPQRSUMGJ + rm /tmp/tmp.fs5N9uA9tw /tmp/tmp.DlPQRSUMGJ + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.8encC1s7hL +++ mktemp ++ local LAST_ERR=/tmp/tmp.byRnmnHefH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8encC1s7hL ++ cat /tmp/tmp.byRnmnHefH ++ rm /tmp/tmp.8encC1s7hL /tmp/tmp.byRnmnHefH ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hJ6X2zvEdM ++ mktemp + local LAST_ERR=/tmp/tmp.5wTgV8kw6e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hJ6X2zvEdM Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3" modified. + cat /tmp/tmp.5wTgV8kw6e + rm /tmp/tmp.hJ6X2zvEdM /tmp/tmp.5wTgV8kw6e + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.tccYVkYmSl ++ mktemp + local LAST_ERR=/tmp/tmp.J8GzslDhEr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tccYVkYmSl customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.J8GzslDhEr + rm /tmp/tmp.tccYVkYmSl /tmp/tmp.J8GzslDhEr + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5tpJQkC6kx ++ mktemp + local LAST_ERR=/tmp/tmp.wi4NFmF2fE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5tpJQkC6kx clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.wi4NFmF2fE + rm /tmp/tmp.5tpJQkC6kx /tmp/tmp.wi4NFmF2fE + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1552-a008e872") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1552/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.Z4saixMqFT ++ mktemp + local LAST_ERR=/tmp/tmp.VdoDIHbY0W + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Z4saixMqFT deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.VdoDIHbY0W + rm /tmp/tmp.Z4saixMqFT /tmp/tmp.VdoDIHbY0W + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.vfs4v7hUpF +++ mktemp ++ local LAST_ERR=/tmp/tmp.d6HJm5pVjO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.vfs4v7hUpF ++ cat /tmp/tmp.d6HJm5pVjO ++ rm /tmp/tmp.vfs4v7hUpF /tmp/tmp.d6HJm5pVjO ++ return 0 + wait_pod percona-server-mongodb-operator-5575c6d57c-68hc9 + local pod=percona-server-mongodb-operator-5575c6d57c-68hc9 + set +o xtrace waiting for pod/percona-server-mongodb-operator-5575c6d57c-68hc9 to be ready.OK + create_namespace monitoring-2-0-20438 + local namespace=monitoring-2-0-20438 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' ++ mktemp + desc 'cleaned up old namespaces monitoring-2-0-20438' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-20438 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-20438 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Ul9xbyYxw5 + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.r0CNELGsLC + local LAST_ERR=/tmp/tmp.NHasIcdKy7 + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.raNFErk3vU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-20438 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Ul9xbyYxw5 + cat /tmp/tmp.NHasIcdKy7 + rm /tmp/tmp.Ul9xbyYxw5 /tmp/tmp.NHasIcdKy7 + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.r0CNELGsLC + cat /tmp/tmp.raNFErk3vU + rm /tmp/tmp.r0CNELGsLC /tmp/tmp.raNFErk3vU + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-20438 ++ mktemp + local LAST_OUT=/tmp/tmp.i0fm6EDF0l ++ mktemp + local LAST_ERR=/tmp/tmp.TQLIemli54 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-20438 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i0fm6EDF0l + cat /tmp/tmp.TQLIemli54 + rm /tmp/tmp.i0fm6EDF0l /tmp/tmp.TQLIemli54 + return 0 + desc 'create namespace monitoring-2-0-20438' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-20438 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-20438 ++ mktemp + local LAST_OUT=/tmp/tmp.tW9KeXxgHV ++ mktemp + local LAST_ERR=/tmp/tmp.Rtny5B2NK4 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-20438 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tW9KeXxgHV namespace/monitoring-2-0-20438 created + cat /tmp/tmp.Rtny5B2NK4 + rm /tmp/tmp.tW9KeXxgHV /tmp/tmp.Rtny5B2NK4 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.gqOvNiOIEc +++ mktemp ++ local LAST_ERR=/tmp/tmp.xz6Xr1aoty ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gqOvNiOIEc ++ cat /tmp/tmp.xz6Xr1aoty ++ rm /tmp/tmp.gqOvNiOIEc /tmp/tmp.xz6Xr1aoty ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3 --namespace=monitoring-2-0-20438 ++ mktemp + local LAST_OUT=/tmp/tmp.cKVFFAMO26 ++ mktemp + local LAST_ERR=/tmp/tmp.kRFSeJs6mr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3 --namespace=monitoring-2-0-20438 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cKVFFAMO26 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1552-a008e872-2-cluster3" modified. + cat /tmp/tmp.kRFSeJs6mr + rm /tmp/tmp.cKVFFAMO26 /tmp/tmp.kRFSeJs6mr + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.gh8v3w4LNg ++ mktemp + local LAST_ERR=/tmp/tmp.tdLLxVr8Mc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gh8v3w4LNg namespace/cert-manager created + cat /tmp/tmp.tdLLxVr8Mc + rm /tmp/tmp.gh8v3w4LNg /tmp/tmp.tdLLxVr8Mc + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.vnpQp4osCi ++ mktemp + local LAST_ERR=/tmp/tmp.LHIV0ab8iV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vnpQp4osCi namespace/cert-manager labeled + cat /tmp/tmp.LHIV0ab8iV + rm /tmp/tmp.vnpQp4osCi /tmp/tmp.LHIV0ab8iV + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.ofEo2Ymx3L ++ mktemp + local LAST_ERR=/tmp/tmp.Fs7640guqs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ofEo2Ymx3L namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.Fs7640guqs Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.ofEo2Ymx3L /tmp/tmp.Fs7640guqs + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.OihqCihGic ++ mktemp + local LAST_ERR=/tmp/tmp.gGYPY8Bjvb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OihqCihGic pod/cert-manager-5658d944df-8wv8f condition met pod/cert-manager-cainjector-cb99ff845-mhgqz condition met pod/cert-manager-webhook-7fd74b8dc7-h8829 condition met + cat /tmp/tmp.gGYPY8Bjvb + rm /tmp/tmp.OihqCihGic /tmp/tmp.gGYPY8Bjvb + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Jun 4 19:58:14 2024 NAMESPACE: monitoring-2-0-20438 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-20438.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.dx6T3Zeo8A ++ mktemp + local LAST_ERR=/tmp/tmp.uCSR84jEwe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dx6T3Zeo8A + cat /tmp/tmp.uCSR84jEwe Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dx6T3Zeo8A + cat /tmp/tmp.uCSR84jEwe Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.dx6T3Zeo8A + cat /tmp/tmp.uCSR84jEwe Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.dx6T3Zeo8A + cat /tmp/tmp.uCSR84jEwe Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.dx6T3Zeo8A /tmp/tmp.uCSR84jEwe + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.8NM5x7o6pO ++ mktemp + local LAST_ERR=/tmp/tmp.6gcP299zNy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8NM5x7o6pO + cat /tmp/tmp.6gcP299zNy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8NM5x7o6pO + cat /tmp/tmp.6gcP299zNy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8NM5x7o6pO + cat /tmp/tmp.6gcP299zNy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.8NM5x7o6pO + cat /tmp/tmp.6gcP299zNy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.8NM5x7o6pO /tmp/tmp.6gcP299zNy + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.68AZnyGU8W ++ mktemp + local LAST_ERR=/tmp/tmp.687FggysEs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.68AZnyGU8W + cat /tmp/tmp.687FggysEs Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.68AZnyGU8W + cat /tmp/tmp.687FggysEs Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.68AZnyGU8W + cat /tmp/tmp.687FggysEs Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.68AZnyGU8W + cat /tmp/tmp.687FggysEs Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.68AZnyGU8W /tmp/tmp.687FggysEs + return 1 + echo 'Retry 2' Retry 2 + sleep 5 + let retry+=1 + '[' 3 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.UboPTzDFJT ++ mktemp + local LAST_ERR=/tmp/tmp.5Czwoyk2ia + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UboPTzDFJT + cat /tmp/tmp.5Czwoyk2ia Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UboPTzDFJT + cat /tmp/tmp.5Czwoyk2ia Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.UboPTzDFJT + cat /tmp/tmp.5Czwoyk2ia Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.UboPTzDFJT + cat /tmp/tmp.5Czwoyk2ia Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.UboPTzDFJT /tmp/tmp.5Czwoyk2ia + return 1 + echo 'Retry 3' Retry 3 + sleep 5 + let retry+=1 + '[' 4 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.IBGgISYFLr ++ mktemp + local LAST_ERR=/tmp/tmp.sVbcZgbDrH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IBGgISYFLr + cat /tmp/tmp.sVbcZgbDrH Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IBGgISYFLr + cat /tmp/tmp.sVbcZgbDrH Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.IBGgISYFLr + cat /tmp/tmp.sVbcZgbDrH Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.IBGgISYFLr + cat /tmp/tmp.sVbcZgbDrH Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.IBGgISYFLr /tmp/tmp.sVbcZgbDrH + return 1 + echo 'Retry 4' Retry 4 + sleep 5 + let retry+=1 + '[' 5 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.44z0KH7Ed6 ++ mktemp + local LAST_ERR=/tmp/tmp.BDLwRrWqGd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.44z0KH7Ed6 + cat /tmp/tmp.BDLwRrWqGd Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.44z0KH7Ed6 + cat /tmp/tmp.BDLwRrWqGd Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.44z0KH7Ed6 + cat /tmp/tmp.BDLwRrWqGd Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.44z0KH7Ed6 + cat /tmp/tmp.BDLwRrWqGd Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.44z0KH7Ed6 /tmp/tmp.BDLwRrWqGd + return 1 + echo 'Retry 5' Retry 5 + sleep 5 + let retry+=1 + '[' 6 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.XMnTYX70De ++ mktemp + local LAST_ERR=/tmp/tmp.OjquLIS5Id + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XMnTYX70De + cat /tmp/tmp.OjquLIS5Id Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XMnTYX70De + cat /tmp/tmp.OjquLIS5Id Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.XMnTYX70De + cat /tmp/tmp.OjquLIS5Id Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.XMnTYX70De + cat /tmp/tmp.OjquLIS5Id Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.XMnTYX70De /tmp/tmp.OjquLIS5Id + return 1 + echo 'Retry 6' Retry 6 + sleep 5 + let retry+=1 + '[' 7 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.BJYSjfeWq8 ++ mktemp + local LAST_ERR=/tmp/tmp.2Y1fsjKdhB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.BJYSjfeWq8 + cat /tmp/tmp.2Y1fsjKdhB Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.BJYSjfeWq8 + cat /tmp/tmp.2Y1fsjKdhB Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.BJYSjfeWq8 + cat /tmp/tmp.2Y1fsjKdhB Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.BJYSjfeWq8 + cat /tmp/tmp.2Y1fsjKdhB Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.BJYSjfeWq8 /tmp/tmp.2Y1fsjKdhB + return 1 + echo 'Retry 7' Retry 7 + sleep 5 + let retry+=1 + '[' 8 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.bJ0GpN1MAl ++ mktemp + local LAST_ERR=/tmp/tmp.lnIrgbi2pp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.bJ0GpN1MAl + cat /tmp/tmp.lnIrgbi2pp Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.bJ0GpN1MAl + cat /tmp/tmp.lnIrgbi2pp Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.bJ0GpN1MAl + cat /tmp/tmp.lnIrgbi2pp Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.bJ0GpN1MAl + cat /tmp/tmp.lnIrgbi2pp Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.bJ0GpN1MAl /tmp/tmp.lnIrgbi2pp + return 1 + echo 'Retry 8' Retry 8 + sleep 5 + let retry+=1 + '[' 9 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.hNyzJvUdLb ++ mktemp + local LAST_ERR=/tmp/tmp.jBambBUpdY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hNyzJvUdLb + cat /tmp/tmp.jBambBUpdY Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hNyzJvUdLb + cat /tmp/tmp.jBambBUpdY Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.hNyzJvUdLb + cat /tmp/tmp.jBambBUpdY Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.hNyzJvUdLb + cat /tmp/tmp.jBambBUpdY Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.hNyzJvUdLb /tmp/tmp.jBambBUpdY + return 1 + echo 'Retry 9' Retry 9 + sleep 5 + let retry+=1 + '[' 10 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.7jcnqFFqW9 ++ mktemp + local LAST_ERR=/tmp/tmp.gZPT6HQ3aL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7jcnqFFqW9 + cat /tmp/tmp.gZPT6HQ3aL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7jcnqFFqW9 + cat /tmp/tmp.gZPT6HQ3aL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.7jcnqFFqW9 + cat /tmp/tmp.gZPT6HQ3aL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.7jcnqFFqW9 + cat /tmp/tmp.gZPT6HQ3aL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.7jcnqFFqW9 /tmp/tmp.gZPT6HQ3aL + return 1 + echo 'Retry 10' Retry 10 + sleep 5 + let retry+=1 + '[' 11 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.5CvKu8vrg9 ++ mktemp + local LAST_ERR=/tmp/tmp.nzLFarO4ki + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.5CvKu8vrg9 + cat /tmp/tmp.nzLFarO4ki Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.5CvKu8vrg9 + cat /tmp/tmp.nzLFarO4ki Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.5CvKu8vrg9 + cat /tmp/tmp.nzLFarO4ki Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.5CvKu8vrg9 + cat /tmp/tmp.nzLFarO4ki Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.5CvKu8vrg9 /tmp/tmp.nzLFarO4ki + return 1 + echo 'Retry 11' Retry 11 + sleep 5 + let retry+=1 + '[' 12 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.mYP1vwG3CD ++ mktemp + local LAST_ERR=/tmp/tmp.pOmDI46hmD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mYP1vwG3CD + cat /tmp/tmp.pOmDI46hmD Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mYP1vwG3CD + cat /tmp/tmp.pOmDI46hmD Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.mYP1vwG3CD + cat /tmp/tmp.pOmDI46hmD Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.mYP1vwG3CD + cat /tmp/tmp.pOmDI46hmD Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.mYP1vwG3CD /tmp/tmp.pOmDI46hmD + return 1 + echo 'Retry 12' Retry 12 + sleep 5 + let retry+=1 + '[' 13 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.8U1ybkaRjg ++ mktemp + local LAST_ERR=/tmp/tmp.6hpLCnqXYL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8U1ybkaRjg + cat /tmp/tmp.6hpLCnqXYL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8U1ybkaRjg + cat /tmp/tmp.6hpLCnqXYL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.8U1ybkaRjg + cat /tmp/tmp.6hpLCnqXYL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.8U1ybkaRjg + cat /tmp/tmp.6hpLCnqXYL Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.8U1ybkaRjg /tmp/tmp.6hpLCnqXYL + return 1 + echo 'Retry 13' Retry 13 + sleep 5 + let retry+=1 + '[' 14 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.uI7qBZOrWJ ++ mktemp + local LAST_ERR=/tmp/tmp.wpDUNtM9vy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uI7qBZOrWJ + cat /tmp/tmp.wpDUNtM9vy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uI7qBZOrWJ + cat /tmp/tmp.wpDUNtM9vy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.uI7qBZOrWJ + cat /tmp/tmp.wpDUNtM9vy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.uI7qBZOrWJ + cat /tmp/tmp.wpDUNtM9vy Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.uI7qBZOrWJ /tmp/tmp.wpDUNtM9vy + return 1 + echo 'Retry 14' Retry 14 + sleep 5 + let retry+=1 + '[' 15 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.EORiHOTe8m ++ mktemp + local LAST_ERR=/tmp/tmp.O8jey99Ruh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EORiHOTe8m + cat /tmp/tmp.O8jey99Ruh Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EORiHOTe8m + cat /tmp/tmp.O8jey99Ruh Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.EORiHOTe8m + cat /tmp/tmp.O8jey99Ruh Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.EORiHOTe8m + cat /tmp/tmp.O8jey99Ruh Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.EORiHOTe8m /tmp/tmp.O8jey99Ruh + return 1 + echo 'Retry 15' Retry 15 + sleep 5 + let retry+=1 + '[' 16 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.KdGi4nuPPA ++ mktemp + local LAST_ERR=/tmp/tmp.eLELqQn65w + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.KdGi4nuPPA + cat /tmp/tmp.eLELqQn65w Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.KdGi4nuPPA + cat /tmp/tmp.eLELqQn65w Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.KdGi4nuPPA + cat /tmp/tmp.eLELqQn65w Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.KdGi4nuPPA + cat /tmp/tmp.eLELqQn65w Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.KdGi4nuPPA /tmp/tmp.eLELqQn65w + return 1 + echo 'Retry 16' Retry 16 + sleep 5 + let retry+=1 + '[' 17 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.lMGbsOcVFy ++ mktemp + local LAST_ERR=/tmp/tmp.UVJYImrf5c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.lMGbsOcVFy + cat /tmp/tmp.UVJYImrf5c Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.lMGbsOcVFy + cat /tmp/tmp.UVJYImrf5c Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.lMGbsOcVFy + cat /tmp/tmp.UVJYImrf5c Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.lMGbsOcVFy + cat /tmp/tmp.UVJYImrf5c Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.lMGbsOcVFy /tmp/tmp.UVJYImrf5c + return 1 + echo 'Retry 17' Retry 17 + sleep 5 + let retry+=1 + '[' 18 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.3d4zyC5hU1 ++ mktemp + local LAST_ERR=/tmp/tmp.nvJf6kMO95 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.3d4zyC5hU1 + cat /tmp/tmp.nvJf6kMO95 Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.3d4zyC5hU1 + cat /tmp/tmp.nvJf6kMO95 Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.3d4zyC5hU1 + cat /tmp/tmp.nvJf6kMO95 Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.3d4zyC5hU1 + cat /tmp/tmp.nvJf6kMO95 Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.3d4zyC5hU1 /tmp/tmp.nvJf6kMO95 + return 1 + echo 'Retry 18' Retry 18 + sleep 5 + let retry+=1 + '[' 19 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.TbZrcVwcMK ++ mktemp + local LAST_ERR=/tmp/tmp.w0YUxOOJ5H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TbZrcVwcMK + cat /tmp/tmp.w0YUxOOJ5H Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TbZrcVwcMK + cat /tmp/tmp.w0YUxOOJ5H Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TbZrcVwcMK + cat /tmp/tmp.w0YUxOOJ5H Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + sleep 8 + cat /tmp/tmp.TbZrcVwcMK + cat /tmp/tmp.w0YUxOOJ5H Error from server: error dialing backend: dial tcp 10.210.0.13:10250: i/o timeout + rm /tmp/tmp.TbZrcVwcMK /tmp/tmp.w0YUxOOJ5H + return 1 + echo 'Retry 19' Retry 19 + sleep 5 + let retry+=1 + '[' 20 -ge 20 ']' + echo 'Max retry count 20 reached. Pmm-server can'\''t start' Max retry count 20 reached. Pmm-server can't start + exit 1