Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-7455 + local ns=monitoring-2-0-7455 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.3P9LrPj40y ++ mktemp + local LAST_ERR=/tmp/tmp.WeIojf36at + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3P9LrPj40y customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.WeIojf36at + rm /tmp/tmp.3P9LrPj40y /tmp/tmp.WeIojf36at + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SFMGiH5dwI ++ mktemp + local LAST_ERR=/tmp/tmp.B4EPqbMPKd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SFMGiH5dwI + cat /tmp/tmp.B4EPqbMPKd + rm /tmp/tmp.SFMGiH5dwI /tmp/tmp.B4EPqbMPKd + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' E0709 07:46:47.640149 2133 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-11-0: the server could not find the requested resource E0709 07:46:47.640307 2133 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-12-0: the server could not find the requested resource E0709 07:46:47.671173 2133 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1: the server could not find the requested resource E0709 07:46:47.722238 2133 memcache.go:287] couldn't get resource list for psmdb.percona.com/v1-10-0: the server could not find the requested resource error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.PyAHzLW23D ++ mktemp + local LAST_ERR=/tmp/tmp.eX0It7xlXa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PyAHzLW23D + cat /tmp/tmp.eX0It7xlXa + rm /tmp/tmp.PyAHzLW23D /tmp/tmp.eX0It7xlXa + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ACOVnQ9TUF ++ mktemp + local LAST_ERR=/tmp/tmp.KjPoRy0HVY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ACOVnQ9TUF + cat /tmp/tmp.KjPoRy0HVY + rm /tmp/tmp.ACOVnQ9TUF /tmp/tmp.KjPoRy0HVY + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.zYJulF3m8P ++ mktemp + local LAST_ERR=/tmp/tmp.bq6lezeLVr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zYJulF3m8P clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.bq6lezeLVr + rm /tmp/tmp.zYJulF3m8P /tmp/tmp.bq6lezeLVr + return 0 + check_crd_for_deletion PR-1567-b27e0b5e + local git_tag=PR-1567-b27e0b5e ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1567-b27e0b5e/deploy/crd.yaml ++ /usr/bin/sed s/---//g ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ClqJGH9xd3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.dPIwDMrHSx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ClqJGH9xd3 ++ cat /tmp/tmp.dPIwDMrHSx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ClqJGH9xd3 ++ cat /tmp/tmp.dPIwDMrHSx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.ClqJGH9xd3 ++ cat /tmp/tmp.dPIwDMrHSx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.ClqJGH9xd3 ++ cat /tmp/tmp.dPIwDMrHSx Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.ClqJGH9xd3 /tmp/tmp.dPIwDMrHSx ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.zkLrrVRH6d + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.tqxsjztppo ++ mktemp + local LAST_ERR=/tmp/tmp.mwAnmr2VUh + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.vWvd7GQz6d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tqxsjztppo + cat /tmp/tmp.vWvd7GQz6d + rm /tmp/tmp.tqxsjztppo /tmp/tmp.vWvd7GQz6d + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-30600" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zkLrrVRH6d namespace "psmdb-operator" deleted + cat /tmp/tmp.mwAnmr2VUh + rm /tmp/tmp.zkLrrVRH6d /tmp/tmp.mwAnmr2VUh + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.6S5lsMDqOZ ++ mktemp + local LAST_ERR=/tmp/tmp.N8939WBzAw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6S5lsMDqOZ + cat /tmp/tmp.N8939WBzAw + rm /tmp/tmp.6S5lsMDqOZ /tmp/tmp.N8939WBzAw + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.cSYpHbqjnf ++ mktemp + local LAST_ERR=/tmp/tmp.wxUwPjnt5i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cSYpHbqjnf namespace/psmdb-operator created + cat /tmp/tmp.wxUwPjnt5i + rm /tmp/tmp.cSYpHbqjnf /tmp/tmp.wxUwPjnt5i + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.T9Ut3EYqVW +++ mktemp ++ local LAST_ERR=/tmp/tmp.MiXGMNrvVF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.T9Ut3EYqVW ++ cat /tmp/tmp.MiXGMNrvVF ++ rm /tmp/tmp.T9Ut3EYqVW /tmp/tmp.MiXGMNrvVF ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.aE8ICeu2Z4 ++ mktemp + local LAST_ERR=/tmp/tmp.NAWy7oNv7G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aE8ICeu2Z4 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1" modified. + cat /tmp/tmp.NAWy7oNv7G + rm /tmp/tmp.aE8ICeu2Z4 /tmp/tmp.NAWy7oNv7G + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yzfKfi4C9D ++ mktemp + local LAST_ERR=/tmp/tmp.TVG5Wk5N7h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yzfKfi4C9D customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.TVG5Wk5N7h + rm /tmp/tmp.yzfKfi4C9D /tmp/tmp.TVG5Wk5N7h + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JNYpF7j0JF ++ mktemp + local LAST_ERR=/tmp/tmp.Oeh4Xlnhp3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JNYpF7j0JF clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.Oeh4Xlnhp3 + rm /tmp/tmp.JNYpF7j0JF /tmp/tmp.Oeh4Xlnhp3 + return 0 + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1567-b27e0b5e") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.uRc4CK3ytf ++ mktemp + local LAST_ERR=/tmp/tmp.np7tf4ANct + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uRc4CK3ytf deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.np7tf4ANct + rm /tmp/tmp.uRc4CK3ytf /tmp/tmp.np7tf4ANct + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ncZWKwasuN +++ mktemp ++ local LAST_ERR=/tmp/tmp.H7yBvnX9gM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ncZWKwasuN ++ cat /tmp/tmp.H7yBvnX9gM ++ rm /tmp/tmp.ncZWKwasuN /tmp/tmp.H7yBvnX9gM ++ return 0 + wait_pod percona-server-mongodb-operator-6cfcdf54dd-zx7s4 + local pod=percona-server-mongodb-operator-6cfcdf54dd-zx7s4 + set +o xtrace waiting for pod/percona-server-mongodb-operator-6cfcdf54dd-zx7s4 to be ready.OK + create_namespace monitoring-2-0-7455 + local namespace=monitoring-2-0-7455 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-2-0-7455' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-7455 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-7455 --ignore-not-found ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.30NYpImhqN ++ mktemp + local LAST_ERR=/tmp/tmp.Eqbq3uqTEE + local exit_status=0 + local timeout=4 ++ mktemp ++ seq 0 2 + local LAST_OUT=/tmp/tmp.UylG08hCaB + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-7455 --ignore-not-found ++ mktemp + local LAST_ERR=/tmp/tmp.OzHTsEgLiP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UylG08hCaB + cat /tmp/tmp.OzHTsEgLiP + rm /tmp/tmp.UylG08hCaB /tmp/tmp.OzHTsEgLiP + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.30NYpImhqN + cat /tmp/tmp.Eqbq3uqTEE + rm /tmp/tmp.30NYpImhqN /tmp/tmp.Eqbq3uqTEE + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-7455 ++ mktemp + local LAST_OUT=/tmp/tmp.ii1dlfYJ8I ++ mktemp + local LAST_ERR=/tmp/tmp.SnQsMkJSE3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-7455 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ii1dlfYJ8I + cat /tmp/tmp.SnQsMkJSE3 + rm /tmp/tmp.ii1dlfYJ8I /tmp/tmp.SnQsMkJSE3 + return 0 + desc 'create namespace monitoring-2-0-7455' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-7455 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-7455 ++ mktemp + local LAST_OUT=/tmp/tmp.RpEbDi22Ew ++ mktemp + local LAST_ERR=/tmp/tmp.cEzdQ0rAHj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-7455 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RpEbDi22Ew namespace/monitoring-2-0-7455 created + cat /tmp/tmp.cEzdQ0rAHj + rm /tmp/tmp.RpEbDi22Ew /tmp/tmp.cEzdQ0rAHj + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.SiCk4sFqX8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.LmElLIc3dT ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SiCk4sFqX8 ++ cat /tmp/tmp.LmElLIc3dT ++ rm /tmp/tmp.SiCk4sFqX8 /tmp/tmp.LmElLIc3dT ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1 --namespace=monitoring-2-0-7455 ++ mktemp + local LAST_OUT=/tmp/tmp.IDaFoXsQTH ++ mktemp + local LAST_ERR=/tmp/tmp.95OpJkcgru + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1 --namespace=monitoring-2-0-7455 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IDaFoXsQTH Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1567-b27e0b5e-7-cluster1" modified. + cat /tmp/tmp.95OpJkcgru + rm /tmp/tmp.IDaFoXsQTH /tmp/tmp.95OpJkcgru + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.JHKTJAm5VU ++ mktemp + local LAST_ERR=/tmp/tmp.Y7fbBFFPP3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JHKTJAm5VU namespace/cert-manager created + cat /tmp/tmp.Y7fbBFFPP3 + rm /tmp/tmp.JHKTJAm5VU /tmp/tmp.Y7fbBFFPP3 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.turCNonFbh ++ mktemp + local LAST_ERR=/tmp/tmp.H59OUnpLLi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.turCNonFbh namespace/cert-manager labeled + cat /tmp/tmp.H59OUnpLLi + rm /tmp/tmp.turCNonFbh /tmp/tmp.H59OUnpLLi + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.XZSMHftePc ++ mktemp + local LAST_ERR=/tmp/tmp.2O3OFQRsYt + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XZSMHftePc namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.2O3OFQRsYt Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.XZSMHftePc /tmp/tmp.2O3OFQRsYt + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.cuxhXdDhO9 ++ mktemp + local LAST_ERR=/tmp/tmp.4VbVcqrKqJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cuxhXdDhO9 pod/cert-manager-5658d944df-r427k condition met pod/cert-manager-cainjector-cb99ff845-6g89b condition met pod/cert-manager-webhook-7fd74b8dc7-fmc2c condition met + cat /tmp/tmp.4VbVcqrKqJ + rm /tmp/tmp.cuxhXdDhO9 /tmp/tmp.4VbVcqrKqJ + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Tue Jul 9 07:50:12 2024 NAMESPACE: monitoring-2-0-7455 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-7455.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.jpHCDBYgBJ ++ mktemp + local LAST_ERR=/tmp/tmp.bRHxRx5JsN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.jpHCDBYgBJ + cat /tmp/tmp.bRHxRx5JsN command terminated with exit code 1 + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jpHCDBYgBJ + cat /tmp/tmp.bRHxRx5JsN + rm /tmp/tmp.jpHCDBYgBJ /tmp/tmp.bRHxRx5JsN + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yv9SKlclq6 ++ mktemp + local LAST_ERR=/tmp/tmp.d4snq3KWzY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yv9SKlclq6 secret/some-users created secret/some-users unchanged + cat /tmp/tmp.d4snq3KWzY + rm /tmp/tmp.yv9SKlclq6 /tmp/tmp.d4snq3KWzY + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.kI8D530VjZ ++ mktemp + local LAST_ERR=/tmp/tmp.wb8ZDLEDMU + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kI8D530VjZ deployment.apps/psmdb-client created + cat /tmp/tmp.wb8ZDLEDMU + rm /tmp/tmp.kI8D530VjZ /tmp/tmp.wb8ZDLEDMU + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1567-b27e0b5e"' + local LAST_OUT=/tmp/tmp.QJQlFqIncn + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' ++ mktemp + local LAST_ERR=/tmp/tmp.tio4nDAXpj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QJQlFqIncn perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.tio4nDAXpj + rm /tmp/tmp.QJQlFqIncn /tmp/tmp.tio4nDAXpj + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.UKlyZS9kR4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yWakJgEpUV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.UKlyZS9kR4 ++ cat /tmp/tmp.yWakJgEpUV ++ rm /tmp/tmp.UKlyZS9kR4 /tmp/tmp.yWakJgEpUV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9iadwXBlkp +++ mktemp ++ local LAST_ERR=/tmp/tmp.tLNOdXf8HC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.9iadwXBlkp ++ cat /tmp/tmp.tLNOdXf8HC ++ rm /tmp/tmp.9iadwXBlkp /tmp/tmp.tLNOdXf8HC ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness......................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.oQH3aDXCAJ ++ mktemp + local LAST_ERR=/tmp/tmp.WFjhkTlsmK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oQH3aDXCAJ + cat /tmp/tmp.WFjhkTlsmK + rm /tmp/tmp.oQH3aDXCAJ /tmp/tmp.WFjhkTlsmK + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-7455 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-7455 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.lffKFtOtH3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.myrVWz66FN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lffKFtOtH3 ++ cat /tmp/tmp.myrVWz66FN ++ rm /tmp/tmp.lffKFtOtH3 /tmp/tmp.myrVWz66FN ++ return 0 + local client_container=psmdb-client-6cd48df8b6-sh52w + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.eGkpMRroNr ++ mktemp + local LAST_ERR=/tmp/tmp.QaE2b2h4HQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eGkpMRroNr Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-7455.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-09T07:54:34.691Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("88b64a4a-60a3-49bb-89d4-28b219149978") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.QaE2b2h4HQ + rm /tmp/tmp.eGkpMRroNr /tmp/tmp.QaE2b2h4HQ + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-7455 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-7455 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TAc3I7McFs +++ mktemp ++ local LAST_ERR=/tmp/tmp.9CiFQvZJMV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TAc3I7McFs ++ cat /tmp/tmp.9CiFQvZJMV ++ rm /tmp/tmp.TAc3I7McFs /tmp/tmp.9CiFQvZJMV ++ return 0 + local client_container=psmdb-client-6cd48df8b6-sh52w + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.sxfk2byNmg ++ mktemp + local LAST_ERR=/tmp/tmp.r9lwV74XLE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sxfk2byNmg Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-7455.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-09T07:54:37.186Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("316939c0-e0e8-417e-9817-f33d38da8e88") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1720511677, 9), "signature" : { "hash" : BinData(0,"LkerMWxDg/Mc/Hn57qWSnWLTU74="), "keyId" : NumberLong("7389541037208764430") } }, "operationTime" : Timestamp(1720511677, 3) } bye + cat /tmp/tmp.r9lwV74XLE + rm /tmp/tmp.sxfk2byNmg /tmp/tmp.r9lwV74XLE + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-7455 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-7455 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RZ89NMYlpx +++ mktemp ++ local LAST_ERR=/tmp/tmp.E0C2vkGxf7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.RZ89NMYlpx ++ cat /tmp/tmp.E0C2vkGxf7 ++ rm /tmp/tmp.RZ89NMYlpx /tmp/tmp.E0C2vkGxf7 ++ return 0 + local client_container=psmdb-client-6cd48df8b6-sh52w + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.mE7vMsWCuh ++ mktemp + local LAST_ERR=/tmp/tmp.PrK8BgPtwB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mE7vMsWCuh Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-7455.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-09T07:54:39.727Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("67c6f799-3533-4e1e-ae2b-7597d3d9f012") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.PrK8BgPtwB + rm /tmp/tmp.mE7vMsWCuh /tmp/tmp.PrK8BgPtwB + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-7455 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-7455 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tXo6hAHNgt +++ mktemp ++ local LAST_ERR=/tmp/tmp.4KR1yLYjEd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.tXo6hAHNgt ++ cat /tmp/tmp.4KR1yLYjEd ++ rm /tmp/tmp.tXo6hAHNgt /tmp/tmp.4KR1yLYjEd ++ return 0 + local client_container=psmdb-client-6cd48df8b6-sh52w + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.TRRIhQcv7P ++ mktemp + local LAST_ERR=/tmp/tmp.2hgNXjFFpe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TRRIhQcv7P Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-7455.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-09T07:54:42.202Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("01024e45-f0a9-4d9e-b385-ae7b643ba53b") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.2hgNXjFFpe + rm /tmp/tmp.TRRIhQcv7P /tmp/tmp.2hgNXjFFpe + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-7455 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-7455 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WH4a9MP4Rp +++ mktemp ++ local LAST_ERR=/tmp/tmp.Fpguzd0A19 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.WH4a9MP4Rp ++ cat /tmp/tmp.Fpguzd0A19 ++ rm /tmp/tmp.WH4a9MP4Rp /tmp/tmp.Fpguzd0A19 ++ return 0 + local client_container=psmdb-client-6cd48df8b6-sh52w + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.6KpB0UOknB ++ mktemp + local LAST_ERR=/tmp/tmp.uOePsKe8Ue + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-sh52w -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-7455.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6KpB0UOknB Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-7455.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-09T07:54:44.648Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("4050ba62-738c-4606-bf22-ac3ad1c09454") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.uOePsKe8Ue + rm /tmp/tmp.6KpB0UOknB /tmp/tmp.uOePsKe8Ue + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.8fPH7xYV0N +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5ibh3WR4Tt ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.8fPH7xYV0N ++++ cat /tmp/tmp.5ibh3WR4Tt ++++ rm /tmp/tmp.8fPH7xYV0N /tmp/tmp.5ibh3WR4Tt ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' +++++ mktemp ++++ sed -e 's/^"//; s/"$//;' ++++ local LAST_OUT=/tmp/tmp.xkJfYKF6Dd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gxDbNRzRu3 ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.xkJfYKF6Dd ++++ cat /tmp/tmp.gxDbNRzRu3 ++++ rm /tmp/tmp.xkJfYKF6Dd /tmp/tmp.gxDbNRzRu3 ++++ return 0 +++ local ip=34.44.143.255 +++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' +++ echo 34.44.143.255 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.44.143.255/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 257 77 --:--:-- --:--:-- --:--:-- 334 + API_KEY='"eyJrIjoiWlVicERWMHY1RER1cm5XZmluZ1Q5RXFmZFAzVEVTYUoiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiWlVicERWMHY1RER1cm5XZmluZ1Q5RXFmZFAzVEVTYUoiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.mN4X1wuWkv ++ mktemp + local LAST_ERR=/tmp/tmp.HI9PsMdte1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiWlVicERWMHY1RER1cm5XZmluZ1Q5RXFmZFAzVEVTYUoiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mN4X1wuWkv secret/some-users patched + cat /tmp/tmp.HI9PsMdte1 + rm /tmp/tmp.mN4X1wuWkv /tmp/tmp.HI9PsMdte1 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JYzyVAR7hF +++ mktemp ++ local LAST_ERR=/tmp/tmp.xN7n8V4dRI ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.JYzyVAR7hF ++ cat /tmp/tmp.xN7n8V4dRI ++ rm /tmp/tmp.JYzyVAR7hF /tmp/tmp.xN7n8V4dRI ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zGkrAR12gn +++ mktemp ++ local LAST_ERR=/tmp/tmp.IgHUO32v8W ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.zGkrAR12gn ++ cat /tmp/tmp.IgHUO32v8W ++ rm /tmp/tmp.zGkrAR12gn /tmp/tmp.IgHUO32v8W ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.QSqtPBAVdr ++ mktemp + local LAST_ERR=/tmp/tmp.FbM7zfOUkv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QSqtPBAVdr + cat /tmp/tmp.FbM7zfOUkv + rm /tmp/tmp.QSqtPBAVdr /tmp/tmp.FbM7zfOUkv + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.61iw8FHyz1 ++ mktemp + local LAST_ERR=/tmp/tmp.hnYiex37k8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.61iw8FHyz1 + cat /tmp/tmp.hnYiex37k8 + rm /tmp/tmp.61iw8FHyz1 /tmp/tmp.hnYiex37k8 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.d3W23Cx3SZ/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.ZLh8cEpEa3 ++ mktemp + local LAST_ERR=/tmp/tmp.uSAt6G0o2d + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ZLh8cEpEa3 + cat /tmp/tmp.uSAt6G0o2d + rm /tmp/tmp.ZLh8cEpEa3 /tmp/tmp.uSAt6G0o2d + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.d3W23Cx3SZ/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.PvOEPI4fHj ++ mktemp + local LAST_ERR=/tmp/tmp.cAtDjLJaPn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PvOEPI4fHj + cat /tmp/tmp.cAtDjLJaPn + rm /tmp/tmp.PvOEPI4fHj /tmp/tmp.cAtDjLJaPn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-7455", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.F8X5FTDFAI ++ mktemp + local LAST_ERR=/tmp/tmp.9rPWuSwCca + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F8X5FTDFAI + cat /tmp/tmp.9rPWuSwCca + rm /tmp/tmp.F8X5FTDFAI /tmp/tmp.9rPWuSwCca + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.d3W23Cx3SZ/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-7455-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-7455-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720511909 ++ /usr/bin/date -u +%s + local end=1720511969 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gL9d6QNXyc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0UwysUm6a2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gL9d6QNXyc +++ cat /tmp/tmp.0UwysUm6a2 +++ rm /tmp/tmp.gL9d6QNXyc /tmp/tmp.0UwysUm6a2 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QqSimQ4yg6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wvdmCjx9JE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QqSimQ4yg6 +++ cat /tmp/tmp.wvdmCjx9JE +++ rm /tmp/tmp.QqSimQ4yg6 /tmp/tmp.wvdmCjx9JE +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + local endpoint=34.44.143.255 + curl -s -k 'https://admin:admin@34.44.143.255/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-rs0-1%22%7D%29&start=1720511909&end=1720511969&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1720509968" "1720509968" + get_metric_values mongodb_connections monitoring-2-0-7455-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-7455-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720511911 ++ /usr/bin/date -u +%s + local end=1720511971 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rZGg2oCFzU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fbnZ8IyzDH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rZGg2oCFzU +++ cat /tmp/tmp.fbnZ8IyzDH +++ rm /tmp/tmp.rZGg2oCFzU /tmp/tmp.fbnZ8IyzDH +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0hT6nCwcXb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LAmP01Lidk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.0hT6nCwcXb +++ cat /tmp/tmp.LAmP01Lidk +++ rm /tmp/tmp.0hT6nCwcXb /tmp/tmp.LAmP01Lidk +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + local endpoint=34.44.143.255 + curl -s -k 'https://admin:admin@34.44.143.255/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-rs0-1%22%7D%29&start=1720511911&end=1720511971&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-7455-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-7455-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720511912 ++ /usr/bin/date -u +%s + local end=1720511972 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pWvmmBXIg1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pXrwUvk6Xv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pWvmmBXIg1 +++ cat /tmp/tmp.pXrwUvk6Xv +++ rm /tmp/tmp.pWvmmBXIg1 /tmp/tmp.pXrwUvk6Xv +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5X2f4vV1xM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bAfVUeIHzi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5X2f4vV1xM +++ cat /tmp/tmp.bAfVUeIHzi +++ rm /tmp/tmp.5X2f4vV1xM /tmp/tmp.bAfVUeIHzi +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + local endpoint=34.44.143.255 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.44.143.255/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-cfg-1%22%7D%29&start=1720511912&end=1720511972&step=60' + grep '^"[0-9]' "1720509967" "1720509967" + get_metric_values mongodb_connections monitoring-2-0-7455-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-7455-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720511913 ++ /usr/bin/date -u +%s + local end=1720511973 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GRd3YWyTE8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.usBTernlqI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GRd3YWyTE8 +++ cat /tmp/tmp.usBTernlqI +++ rm /tmp/tmp.GRd3YWyTE8 /tmp/tmp.usBTernlqI +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BgjNMJPMG8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hqLzp83UIB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BgjNMJPMG8 +++ cat /tmp/tmp.hqLzp83UIB +++ rm /tmp/tmp.BgjNMJPMG8 /tmp/tmp.hqLzp83UIB +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + local endpoint=34.44.143.255 + curl -s -k 'https://admin:admin@34.44.143.255/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-cfg-1%22%7D%29&start=1720511913&end=1720511973&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-7455-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-7455-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720511915 ++ /usr/bin/date -u +%s + local end=1720511975 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JK7MoDNUVE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0iyqJA9cFw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JK7MoDNUVE +++ cat /tmp/tmp.0iyqJA9cFw +++ rm /tmp/tmp.JK7MoDNUVE /tmp/tmp.0iyqJA9cFw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9BewHT208A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.s59Y6HQWNN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.9BewHT208A +++ cat /tmp/tmp.s59Y6HQWNN +++ rm /tmp/tmp.9BewHT208A /tmp/tmp.s59Y6HQWNN +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + local endpoint=34.44.143.255 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.44.143.255/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-7455-monitoring-mongos-0%22%7D%29&start=1720511915&end=1720511975&step=60' "1720509968" "1720509968" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-08T20:01:06+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-09T08:01:06+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nZuSBymMXh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.k3TAAsHgEh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nZuSBymMXh +++ cat /tmp/tmp.k3TAAsHgEh +++ rm /tmp/tmp.nZuSBymMXh /tmp/tmp.k3TAAsHgEh +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UvmSk2y79t ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fAWIeBBZJK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UvmSk2y79t +++ cat /tmp/tmp.fAWIeBBZJK +++ rm /tmp/tmp.UvmSk2y79t /tmp/tmp.fAWIeBBZJK +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + endpoint=34.44.143.255 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.44.143.255/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "FIND version" "DBSTATS clusterTime,dbStats,hash,id,keyId,lsid,mode,scale,signature" "FIND system.version _id" "FIND oplog.rs"' + rm -f payload.json + [[ "TOTAL" "FIND version" "DBSTATS clusterTime,dbStats,hash,id,keyId,lsid,mode,scale,signature" "FIND system.version _id" "FIND oplog.rs" == \n\u\l\l ]] + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-08T20:01:08+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-09T08:01:08+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8T2NRUg5LL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lcRsakVViD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.8T2NRUg5LL +++ cat /tmp/tmp.lcRsakVViD +++ rm /tmp/tmp.8T2NRUg5LL /tmp/tmp.lcRsakVViD +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HH9bHWzfWa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AOznfEnK43 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HH9bHWzfWa +++ cat /tmp/tmp.AOznfEnK43 +++ rm /tmp/tmp.HH9bHWzfWa /tmp/tmp.AOznfEnK43 +++ return 0 ++ local ip=34.44.143.255 ++ '[' -n 34.44.143.255 -a 34.44.143.255 '!=' null ']' ++ echo 34.44.143.255 ++ return + endpoint=34.44.143.255 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.44.143.255/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version"' + rm -f payload.json + [[ "TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version" == \n\u\l\l ]] + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xJcs0eRQRa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vVw6baUYMn +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xJcs0eRQRa +++ cat /tmp/tmp.vVw6baUYMn +++ rm /tmp/tmp.xJcs0eRQRa /tmp/tmp.vVw6baUYMn +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RtoSLcGxiB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zag4jWoCVB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RtoSLcGxiB +++ cat /tmp/tmp.zag4jWoCVB +++ rm /tmp/tmp.RtoSLcGxiB /tmp/tmp.zag4jWoCVB +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.FRWa0plAEt ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IV93LRWCcy +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.FRWa0plAEt +++ cat /tmp/tmp.IV93LRWCcy +++ rm /tmp/tmp.FRWa0plAEt /tmp/tmp.IV93LRWCcy +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cI9GcvzvvC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3yHItGOQMJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cI9GcvzvvC +++ cat /tmp/tmp.3yHItGOQMJ +++ rm /tmp/tmp.cI9GcvzvvC /tmp/tmp.3yHItGOQMJ +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Jj713oLhIH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.haWCTRCsq8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Jj713oLhIH +++ cat /tmp/tmp.haWCTRCsq8 +++ rm /tmp/tmp.Jj713oLhIH /tmp/tmp.haWCTRCsq8 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JEtb8ZZDqc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AjicolSe8T +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.JEtb8ZZDqc +++ cat /tmp/tmp.AjicolSe8T +++ rm /tmp/tmp.JEtb8ZZDqc /tmp/tmp.AjicolSe8T +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PJw99RuU0c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5UkHebOg9U +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.PJw99RuU0c +++ cat /tmp/tmp.5UkHebOg9U +++ rm /tmp/tmp.PJw99RuU0c /tmp/tmp.5UkHebOg9U +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6b5Ul3F0Gi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WVw7sZuiws +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.6b5Ul3F0Gi +++ cat /tmp/tmp.WVw7sZuiws +++ rm /tmp/tmp.6b5Ul3F0Gi /tmp/tmp.WVw7sZuiws +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DM92YJxnzE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cY0HpfiBuF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DM92YJxnzE +++ cat /tmp/tmp.cY0HpfiBuF +++ rm /tmp/tmp.DM92YJxnzE /tmp/tmp.cY0HpfiBuF +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.DJCdu63Roi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YvdAjZrKWE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.DJCdu63Roi +++ cat /tmp/tmp.YvdAjZrKWE +++ rm /tmp/tmp.DJCdu63Roi /tmp/tmp.YvdAjZrKWE +++ return 0 ++ echo /node_id/d6e0d994-1129-43ab-8870-c2c75d24d930 /node_id/5c6ddbde-427a-479f-857c-567d76d78008 /node_id/64c79d73-4a13-4b0d-a5b7-7ae5c38ba9d3 /node_id/ca782472-157c-4be6-ac5e-92b9ba1f9d0f /node_id/4d261403-0df2-446f-8513-4fdf3f2e19d4 /node_id/33afe8ef-c736-4e70-9fba-68e7d5b15d4d /node_id/d39de1be-67bd-4348-9a41-0894c84f546c /node_id/e942f811-bfd1-4c68-a7bf-8daba7eae1d3 /node_id/1ae4dd99-a987-4b6c-bff0-b375d7db5edb + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/d6e0d994-1129-43ab-8870-c2c75d24d930 /node_id/5c6ddbde-427a-479f-857c-567d76d78008 /node_id/64c79d73-4a13-4b0d-a5b7-7ae5c38ba9d3 /node_id/ca782472-157c-4be6-ac5e-92b9ba1f9d0f /node_id/4d261403-0df2-446f-8513-4fdf3f2e19d4 /node_id/33afe8ef-c736-4e70-9fba-68e7d5b15d4d /node_id/d39de1be-67bd-4348-9a41-0894c84f546c /node_id/e942f811-bfd1-4c68-a7bf-8daba7eae1d3 /node_id/1ae4dd99-a987-4b6c-bff0-b375d7db5edb ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d6e0d994-1129-43ab-8870-c2c75d24d930 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.WE6STMYgK6 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.gcS703e8KP +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WE6STMYgK6 +++++ cat /tmp/tmp.gcS703e8KP Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WE6STMYgK6 +++++ cat /tmp/tmp.gcS703e8KP Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WE6STMYgK6 +++++ cat /tmp/tmp.gcS703e8KP Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.WE6STMYgK6 +++++ cat /tmp/tmp.gcS703e8KP Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.WE6STMYgK6 /tmp/tmp.gcS703e8KP +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SZSiHK2aG1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TLKFhgjptx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SZSiHK2aG1 +++ cat /tmp/tmp.TLKFhgjptx command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SZSiHK2aG1 +++ cat /tmp/tmp.TLKFhgjptx command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SZSiHK2aG1 +++ cat /tmp/tmp.TLKFhgjptx command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.SZSiHK2aG1 +++ cat /tmp/tmp.TLKFhgjptx command terminated with exit code 1 +++ rm /tmp/tmp.SZSiHK2aG1 /tmp/tmp.TLKFhgjptx +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5c6ddbde-427a-479f-857c-567d76d78008 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.28MEIgNRIv ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.RKb0GJ80Bn +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.28MEIgNRIv +++++ cat /tmp/tmp.RKb0GJ80Bn Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.28MEIgNRIv +++++ cat /tmp/tmp.RKb0GJ80Bn Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.28MEIgNRIv +++++ cat /tmp/tmp.RKb0GJ80Bn Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.28MEIgNRIv +++++ cat /tmp/tmp.RKb0GJ80Bn Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.28MEIgNRIv /tmp/tmp.RKb0GJ80Bn +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nsdJZGTFIo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Y4XovIe2dK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nsdJZGTFIo +++ cat /tmp/tmp.Y4XovIe2dK command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nsdJZGTFIo +++ cat /tmp/tmp.Y4XovIe2dK command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nsdJZGTFIo +++ cat /tmp/tmp.Y4XovIe2dK command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.nsdJZGTFIo +++ cat /tmp/tmp.Y4XovIe2dK command terminated with exit code 1 +++ rm /tmp/tmp.nsdJZGTFIo /tmp/tmp.Y4XovIe2dK +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/64c79d73-4a13-4b0d-a5b7-7ae5c38ba9d3 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.b4Eob4Eutw ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.SahwsKpJnT +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.b4Eob4Eutw +++++ cat /tmp/tmp.SahwsKpJnT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.b4Eob4Eutw +++++ cat /tmp/tmp.SahwsKpJnT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.b4Eob4Eutw +++++ cat /tmp/tmp.SahwsKpJnT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.b4Eob4Eutw +++++ cat /tmp/tmp.SahwsKpJnT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.b4Eob4Eutw /tmp/tmp.SahwsKpJnT +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.q9tjVLYXoC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.plbQP2HDYu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q9tjVLYXoC +++ cat /tmp/tmp.plbQP2HDYu command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q9tjVLYXoC +++ cat /tmp/tmp.plbQP2HDYu command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.q9tjVLYXoC +++ cat /tmp/tmp.plbQP2HDYu command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.q9tjVLYXoC +++ cat /tmp/tmp.plbQP2HDYu command terminated with exit code 1 +++ rm /tmp/tmp.q9tjVLYXoC /tmp/tmp.plbQP2HDYu +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ca782472-157c-4be6-ac5e-92b9ba1f9d0f +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ReDfBxNN3k ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.x8ta3cwx3T +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ReDfBxNN3k +++++ cat /tmp/tmp.x8ta3cwx3T Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ReDfBxNN3k +++++ cat /tmp/tmp.x8ta3cwx3T Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.ReDfBxNN3k +++++ cat /tmp/tmp.x8ta3cwx3T Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.ReDfBxNN3k +++++ cat /tmp/tmp.x8ta3cwx3T Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.ReDfBxNN3k /tmp/tmp.x8ta3cwx3T +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zImYCeJFTO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1KOSkJ7byJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zImYCeJFTO +++ cat /tmp/tmp.1KOSkJ7byJ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zImYCeJFTO +++ cat /tmp/tmp.1KOSkJ7byJ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zImYCeJFTO +++ cat /tmp/tmp.1KOSkJ7byJ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.zImYCeJFTO +++ cat /tmp/tmp.1KOSkJ7byJ command terminated with exit code 1 +++ rm /tmp/tmp.zImYCeJFTO /tmp/tmp.1KOSkJ7byJ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4d261403-0df2-446f-8513-4fdf3f2e19d4 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.GUf2txdPfc ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hSIgLP93Fj +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.GUf2txdPfc +++++ cat /tmp/tmp.hSIgLP93Fj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.GUf2txdPfc +++++ cat /tmp/tmp.hSIgLP93Fj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.GUf2txdPfc +++++ cat /tmp/tmp.hSIgLP93Fj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.GUf2txdPfc +++++ cat /tmp/tmp.hSIgLP93Fj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.GUf2txdPfc /tmp/tmp.hSIgLP93Fj +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7JfMTQIsof ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3B1Ztb46Zk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.7JfMTQIsof +++ cat /tmp/tmp.3B1Ztb46Zk command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.7JfMTQIsof +++ cat /tmp/tmp.3B1Ztb46Zk command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.7JfMTQIsof +++ cat /tmp/tmp.3B1Ztb46Zk command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.7JfMTQIsof +++ cat /tmp/tmp.3B1Ztb46Zk command terminated with exit code 1 +++ rm /tmp/tmp.7JfMTQIsof /tmp/tmp.3B1Ztb46Zk +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/33afe8ef-c736-4e70-9fba-68e7d5b15d4d +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.J8FvKub6cx ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qjfHnej2zb +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.J8FvKub6cx +++++ cat /tmp/tmp.qjfHnej2zb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.J8FvKub6cx +++++ cat /tmp/tmp.qjfHnej2zb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.J8FvKub6cx +++++ cat /tmp/tmp.qjfHnej2zb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.J8FvKub6cx +++++ cat /tmp/tmp.qjfHnej2zb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.J8FvKub6cx /tmp/tmp.qjfHnej2zb +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ir4nynWECs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Px3vTABprP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ir4nynWECs +++ cat /tmp/tmp.Px3vTABprP command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ir4nynWECs +++ cat /tmp/tmp.Px3vTABprP command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Ir4nynWECs +++ cat /tmp/tmp.Px3vTABprP command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Ir4nynWECs +++ cat /tmp/tmp.Px3vTABprP command terminated with exit code 1 +++ rm /tmp/tmp.Ir4nynWECs /tmp/tmp.Px3vTABprP +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d39de1be-67bd-4348-9a41-0894c84f546c +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Ragv6NcxB8 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.viOQStIT9K +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Ragv6NcxB8 +++++ cat /tmp/tmp.viOQStIT9K Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Ragv6NcxB8 +++++ cat /tmp/tmp.viOQStIT9K Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Ragv6NcxB8 +++++ cat /tmp/tmp.viOQStIT9K Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.Ragv6NcxB8 +++++ cat /tmp/tmp.viOQStIT9K Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.Ragv6NcxB8 /tmp/tmp.viOQStIT9K +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EvTRKjYEeA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.egmVOWtpXV +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EvTRKjYEeA +++ cat /tmp/tmp.egmVOWtpXV command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EvTRKjYEeA +++ cat /tmp/tmp.egmVOWtpXV command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.EvTRKjYEeA +++ cat /tmp/tmp.egmVOWtpXV command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.EvTRKjYEeA +++ cat /tmp/tmp.egmVOWtpXV command terminated with exit code 1 +++ rm /tmp/tmp.EvTRKjYEeA /tmp/tmp.egmVOWtpXV +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/e942f811-bfd1-4c68-a7bf-8daba7eae1d3 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.jHtAnU8R94 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NrDVRncxfa +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.jHtAnU8R94 +++++ cat /tmp/tmp.NrDVRncxfa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.jHtAnU8R94 +++++ cat /tmp/tmp.NrDVRncxfa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.jHtAnU8R94 +++++ cat /tmp/tmp.NrDVRncxfa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.jHtAnU8R94 +++++ cat /tmp/tmp.NrDVRncxfa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.jHtAnU8R94 /tmp/tmp.NrDVRncxfa +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n0IRBxV3LV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.32iQi48Vil +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n0IRBxV3LV +++ cat /tmp/tmp.32iQi48Vil command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n0IRBxV3LV +++ cat /tmp/tmp.32iQi48Vil command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n0IRBxV3LV +++ cat /tmp/tmp.32iQi48Vil command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.n0IRBxV3LV +++ cat /tmp/tmp.32iQi48Vil command terminated with exit code 1 +++ rm /tmp/tmp.n0IRBxV3LV /tmp/tmp.32iQi48Vil +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/1ae4dd99-a987-4b6c-bff0-b375d7db5edb +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.E1dwm2Zw9f ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.F8XuikyAhM +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.E1dwm2Zw9f +++++ cat /tmp/tmp.F8XuikyAhM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.E1dwm2Zw9f +++++ cat /tmp/tmp.F8XuikyAhM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.E1dwm2Zw9f +++++ cat /tmp/tmp.F8XuikyAhM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.E1dwm2Zw9f +++++ cat /tmp/tmp.F8XuikyAhM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.E1dwm2Zw9f /tmp/tmp.F8XuikyAhM +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3wWROalSHZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sjQdGAdGqa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3wWROalSHZ +++ cat /tmp/tmp.sjQdGAdGqa command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3wWROalSHZ +++ cat /tmp/tmp.sjQdGAdGqa command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.3wWROalSHZ +++ cat /tmp/tmp.sjQdGAdGqa command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.3wWROalSHZ +++ cat /tmp/tmp.sjQdGAdGqa command terminated with exit code 1 +++ rm /tmp/tmp.3wWROalSHZ /tmp/tmp.sjQdGAdGqa +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.ucHHMzzahV ++ mktemp + local LAST_ERR=/tmp/tmp.pGPavaeCdP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ucHHMzzahV perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.pGPavaeCdP + rm /tmp/tmp.ucHHMzzahV /tmp/tmp.pGPavaeCdP + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace pod/monitoring-mongos-0 - ......................................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace pod/monitoring-rs0-0 - .........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace pod/monitoring-cfg-0 - .........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.loCxdNOzDS ++ mktemp + local LAST_ERR=/tmp/tmp.GIs6eVOZ4Z + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.loCxdNOzDS NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27017/TCP 16m + cat /tmp/tmp.GIs6eVOZ4Z + rm /tmp/tmp.loCxdNOzDS /tmp/tmp.GIs6eVOZ4Z + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.zyyHXCn7mA ++ mktemp + local LAST_ERR=/tmp/tmp.uSQ8Wem3MY + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zyyHXCn7mA NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27017/TCP 16m + cat /tmp/tmp.uSQ8Wem3MY + rm /tmp/tmp.zyyHXCn7mA /tmp/tmp.uSQ8Wem3MY + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.weCd5oj47s ++ mktemp + local LAST_ERR=/tmp/tmp.iaYW2ij0LO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.weCd5oj47s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 10.122.104.205 27017/TCP 14m + cat /tmp/tmp.iaYW2ij0LO + rm /tmp/tmp.weCd5oj47s /tmp/tmp.iaYW2ij0LO + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/d6e0d994-1129-43ab-8870-c2c75d24d930 /node_id/5c6ddbde-427a-479f-857c-567d76d78008 /node_id/64c79d73-4a13-4b0d-a5b7-7ae5c38ba9d3 /node_id/ca782472-157c-4be6-ac5e-92b9ba1f9d0f /node_id/4d261403-0df2-446f-8513-4fdf3f2e19d4 /node_id/33afe8ef-c736-4e70-9fba-68e7d5b15d4d /node_id/d39de1be-67bd-4348-9a41-0894c84f546c /node_id/e942f811-bfd1-4c68-a7bf-8daba7eae1d3 /node_id/1ae4dd99-a987-4b6c-bff0-b375d7db5edb ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d6e0d994-1129-43ab-8870-c2c75d24d930 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.MStc1TuXhf ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Kzbj7QSKzZ +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.MStc1TuXhf +++++ cat /tmp/tmp.Kzbj7QSKzZ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.MStc1TuXhf +++++ cat /tmp/tmp.Kzbj7QSKzZ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.MStc1TuXhf +++++ cat /tmp/tmp.Kzbj7QSKzZ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.MStc1TuXhf +++++ cat /tmp/tmp.Kzbj7QSKzZ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.MStc1TuXhf /tmp/tmp.Kzbj7QSKzZ +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ULm5foCtL3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UU8th6e6NI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ULm5foCtL3 +++ cat /tmp/tmp.UU8th6e6NI command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ULm5foCtL3 +++ cat /tmp/tmp.UU8th6e6NI command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ULm5foCtL3 +++ cat /tmp/tmp.UU8th6e6NI command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.ULm5foCtL3 +++ cat /tmp/tmp.UU8th6e6NI command terminated with exit code 1 +++ rm /tmp/tmp.ULm5foCtL3 /tmp/tmp.UU8th6e6NI +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5c6ddbde-427a-479f-857c-567d76d78008 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.dEr96ccTEK ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.8VigWFf0XD +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dEr96ccTEK +++++ cat /tmp/tmp.8VigWFf0XD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dEr96ccTEK +++++ cat /tmp/tmp.8VigWFf0XD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dEr96ccTEK +++++ cat /tmp/tmp.8VigWFf0XD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.dEr96ccTEK +++++ cat /tmp/tmp.8VigWFf0XD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.dEr96ccTEK /tmp/tmp.8VigWFf0XD +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mAfkqWR9dm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UDNqKQFTjQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mAfkqWR9dm +++ cat /tmp/tmp.UDNqKQFTjQ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mAfkqWR9dm +++ cat /tmp/tmp.UDNqKQFTjQ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mAfkqWR9dm +++ cat /tmp/tmp.UDNqKQFTjQ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.mAfkqWR9dm +++ cat /tmp/tmp.UDNqKQFTjQ command terminated with exit code 1 +++ rm /tmp/tmp.mAfkqWR9dm /tmp/tmp.UDNqKQFTjQ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/64c79d73-4a13-4b0d-a5b7-7ae5c38ba9d3 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.stmyipJe0j ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.LM6nfqqtLb +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.stmyipJe0j +++++ cat /tmp/tmp.LM6nfqqtLb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.stmyipJe0j +++++ cat /tmp/tmp.LM6nfqqtLb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.stmyipJe0j +++++ cat /tmp/tmp.LM6nfqqtLb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.stmyipJe0j +++++ cat /tmp/tmp.LM6nfqqtLb Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.stmyipJe0j /tmp/tmp.LM6nfqqtLb +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jOIVIYwvWZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gqChLSJRIj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.jOIVIYwvWZ +++ cat /tmp/tmp.gqChLSJRIj command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.jOIVIYwvWZ +++ cat /tmp/tmp.gqChLSJRIj command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.jOIVIYwvWZ +++ cat /tmp/tmp.gqChLSJRIj command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.jOIVIYwvWZ +++ cat /tmp/tmp.gqChLSJRIj command terminated with exit code 1 +++ rm /tmp/tmp.jOIVIYwvWZ /tmp/tmp.gqChLSJRIj +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ca782472-157c-4be6-ac5e-92b9ba1f9d0f +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.m1DyCuOFhP ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.mazTZmcCkA +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.m1DyCuOFhP +++++ cat /tmp/tmp.mazTZmcCkA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.m1DyCuOFhP +++++ cat /tmp/tmp.mazTZmcCkA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.m1DyCuOFhP +++++ cat /tmp/tmp.mazTZmcCkA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.m1DyCuOFhP +++++ cat /tmp/tmp.mazTZmcCkA Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.m1DyCuOFhP /tmp/tmp.mazTZmcCkA +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Pimwtr3FIN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kifZLg4DMf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Pimwtr3FIN +++ cat /tmp/tmp.kifZLg4DMf command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Pimwtr3FIN +++ cat /tmp/tmp.kifZLg4DMf command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Pimwtr3FIN +++ cat /tmp/tmp.kifZLg4DMf command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Pimwtr3FIN +++ cat /tmp/tmp.kifZLg4DMf command terminated with exit code 1 +++ rm /tmp/tmp.Pimwtr3FIN /tmp/tmp.kifZLg4DMf +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/4d261403-0df2-446f-8513-4fdf3f2e19d4 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.RwEr8trZhR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.lXP1vejJld +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RwEr8trZhR +++++ cat /tmp/tmp.lXP1vejJld Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RwEr8trZhR +++++ cat /tmp/tmp.lXP1vejJld Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RwEr8trZhR +++++ cat /tmp/tmp.lXP1vejJld Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.RwEr8trZhR +++++ cat /tmp/tmp.lXP1vejJld Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.RwEr8trZhR /tmp/tmp.lXP1vejJld +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O5PDNA9ZMI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5cWcaUQjOZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.O5PDNA9ZMI +++ cat /tmp/tmp.5cWcaUQjOZ command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.O5PDNA9ZMI +++ cat /tmp/tmp.5cWcaUQjOZ command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.O5PDNA9ZMI +++ cat /tmp/tmp.5cWcaUQjOZ command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.O5PDNA9ZMI +++ cat /tmp/tmp.5cWcaUQjOZ command terminated with exit code 1 +++ rm /tmp/tmp.O5PDNA9ZMI /tmp/tmp.5cWcaUQjOZ +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/33afe8ef-c736-4e70-9fba-68e7d5b15d4d +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.s93Do8M0lG ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.SHu02u8idL +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s93Do8M0lG +++++ cat /tmp/tmp.SHu02u8idL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s93Do8M0lG +++++ cat /tmp/tmp.SHu02u8idL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s93Do8M0lG +++++ cat /tmp/tmp.SHu02u8idL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.s93Do8M0lG +++++ cat /tmp/tmp.SHu02u8idL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.s93Do8M0lG /tmp/tmp.SHu02u8idL +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.u8hJDdIhuo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JS65l09IVu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.u8hJDdIhuo +++ cat /tmp/tmp.JS65l09IVu command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.u8hJDdIhuo +++ cat /tmp/tmp.JS65l09IVu command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.u8hJDdIhuo +++ cat /tmp/tmp.JS65l09IVu command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.u8hJDdIhuo +++ cat /tmp/tmp.JS65l09IVu command terminated with exit code 1 +++ rm /tmp/tmp.u8hJDdIhuo /tmp/tmp.JS65l09IVu +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d39de1be-67bd-4348-9a41-0894c84f546c +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.JxKa6zwppR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.8NmbCs8DHe +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JxKa6zwppR +++++ cat /tmp/tmp.8NmbCs8DHe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JxKa6zwppR +++++ cat /tmp/tmp.8NmbCs8DHe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JxKa6zwppR +++++ cat /tmp/tmp.8NmbCs8DHe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.JxKa6zwppR +++++ cat /tmp/tmp.8NmbCs8DHe Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.JxKa6zwppR /tmp/tmp.8NmbCs8DHe +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gtuSFYLlCn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MGIaADxuCG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.gtuSFYLlCn +++ cat /tmp/tmp.MGIaADxuCG command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.gtuSFYLlCn +++ cat /tmp/tmp.MGIaADxuCG command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.gtuSFYLlCn +++ cat /tmp/tmp.MGIaADxuCG command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.gtuSFYLlCn +++ cat /tmp/tmp.MGIaADxuCG command terminated with exit code 1 +++ rm /tmp/tmp.gtuSFYLlCn /tmp/tmp.MGIaADxuCG +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/e942f811-bfd1-4c68-a7bf-8daba7eae1d3 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.uwzrQxUb7K ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qfSzl87rMj +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uwzrQxUb7K +++++ cat /tmp/tmp.qfSzl87rMj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uwzrQxUb7K +++++ cat /tmp/tmp.qfSzl87rMj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uwzrQxUb7K +++++ cat /tmp/tmp.qfSzl87rMj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.uwzrQxUb7K +++++ cat /tmp/tmp.qfSzl87rMj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.uwzrQxUb7K /tmp/tmp.qfSzl87rMj +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.IrvDgp2XWv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.BxWIQJSOuU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.IrvDgp2XWv +++ cat /tmp/tmp.BxWIQJSOuU command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.IrvDgp2XWv +++ cat /tmp/tmp.BxWIQJSOuU command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.IrvDgp2XWv +++ cat /tmp/tmp.BxWIQJSOuU command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.IrvDgp2XWv +++ cat /tmp/tmp.BxWIQJSOuU command terminated with exit code 1 +++ rm /tmp/tmp.IrvDgp2XWv /tmp/tmp.BxWIQJSOuU +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/1ae4dd99-a987-4b6c-bff0-b375d7db5edb ++++ get_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.rmzMFErb0B ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.o6COTbQ5FX +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rmzMFErb0B +++++ cat /tmp/tmp.o6COTbQ5FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rmzMFErb0B +++++ cat /tmp/tmp.o6COTbQ5FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.rmzMFErb0B +++++ cat /tmp/tmp.o6COTbQ5FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.rmzMFErb0B +++++ cat /tmp/tmp.o6COTbQ5FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.rmzMFErb0B /tmp/tmp.o6COTbQ5FX +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5uM3u206GW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WLzlEzoPMA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5uM3u206GW +++ cat /tmp/tmp.WLzlEzoPMA command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5uM3u206GW +++ cat /tmp/tmp.WLzlEzoPMA command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-7455 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.5uM3u206GW +++ cat /tmp/tmp.WLzlEzoPMA command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.5uM3u206GW +++ cat /tmp/tmp.WLzlEzoPMA command terminated with exit code 1 +++ rm /tmp/tmp.5uM3u206GW /tmp/tmp.WLzlEzoPMA +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Aj4rDqdRc1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9nXdhejKpS ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Aj4rDqdRc1 ++ cat /tmp/tmp.9nXdhejKpS error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-7455" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Aj4rDqdRc1 ++ cat /tmp/tmp.9nXdhejKpS error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-7455" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Aj4rDqdRc1 ++ cat /tmp/tmp.9nXdhejKpS error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-7455" ++ sleep 8 ++ cat /tmp/tmp.Aj4rDqdRc1 ++ cat /tmp/tmp.9nXdhejKpS error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-7455" ++ rm /tmp/tmp.Aj4rDqdRc1 /tmp/tmp.9nXdhejKpS ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ kubectl_bin get secrets -o json ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0jN9PxAVJb +++ mktemp ++ local LAST_ERR=/tmp/tmp.RN3D2s9Kd8 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.0jN9PxAVJb ++ cat /tmp/tmp.RN3D2s9Kd8 ++ rm /tmp/tmp.0jN9PxAVJb /tmp/tmp.RN3D2s9Kd8 ++ return 0 + secrets='YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vjbrl9G6aD +++ mktemp ++ local LAST_ERR=/tmp/tmp.PlChGUda7P ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Vjbrl9G6aD ++ cat /tmp/tmp.PlChGUda7P ++ rm /tmp/tmp.Vjbrl9G6aD /tmp/tmp.PlChGUda7P ++ return 0 + pods='monitoring-0 psmdb-client-6cd48df8b6-sh52w' + echo pods=monitoring-0 psmdb-client-6cd48df8b6-sh52w pods=monitoring-0 psmdb-client-6cd48df8b6-sh52w + collect_logs monitoring-2-0-7455 + local containers + local count + NS=monitoring-2-0-7455 + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-7455 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R3SYXWSQSp +++ mktemp ++ local LAST_ERR=/tmp/tmp.5tHgZ2wOhf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-7455 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.R3SYXWSQSp ++ cat /tmp/tmp.5tHgZ2wOhf ++ rm /tmp/tmp.R3SYXWSQSp /tmp/tmp.5tHgZ2wOhf ++ return 0 + containers=monitoring + for c in '$containers' + [[ monitoring =~ pmm ]] + kubectl_bin -n monitoring-2-0-7455 logs monitoring-0 -c monitoring ++ mktemp + local LAST_OUT=/tmp/tmp.LYvU9hisgE ++ mktemp + local LAST_ERR=/tmp/tmp.HohdaEedNv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-7455 logs monitoring-0 -c monitoring + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LYvU9hisgE + cat /tmp/tmp.HohdaEedNv + rm /tmp/tmp.LYvU9hisgE /tmp/tmp.HohdaEedNv + return 0 + echo logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-7455 get pod psmdb-client-6cd48df8b6-sh52w -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DK9CzOe8ib +++ mktemp ++ local LAST_ERR=/tmp/tmp.c18FhrQVH7 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-7455 get pod psmdb-client-6cd48df8b6-sh52w -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DK9CzOe8ib ++ cat /tmp/tmp.c18FhrQVH7 ++ rm /tmp/tmp.DK9CzOe8ib /tmp/tmp.c18FhrQVH7 ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-2-0-7455 logs psmdb-client-6cd48df8b6-sh52w -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.P5uSOIyLlo ++ mktemp + local LAST_ERR=/tmp/tmp.7WHoaAQ3Mw + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-7455 logs psmdb-client-6cd48df8b6-sh52w -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P5uSOIyLlo + cat /tmp/tmp.7WHoaAQ3Mw + rm /tmp/tmp.P5uSOIyLlo /tmp/tmp.7WHoaAQ3Mw + return 0 + echo logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-psmdb-client-6cd48df8b6-sh52w-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ kubectl_bin -n psmdb-operator get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yZVDPXB6lQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.hVpYx9kIuM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yZVDPXB6lQ ++ cat /tmp/tmp.hVpYx9kIuM ++ rm /tmp/tmp.yZVDPXB6lQ /tmp/tmp.hVpYx9kIuM ++ return 0 + pods=percona-server-mongodb-operator-6cfcdf54dd-zx7s4 + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-6cfcdf54dd-zx7s4 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7ERj5NnxSj +++ mktemp ++ local LAST_ERR=/tmp/tmp.FmiXMYnZEx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-6cfcdf54dd-zx7s4 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.7ERj5NnxSj ++ cat /tmp/tmp.FmiXMYnZEx ++ rm /tmp/tmp.7ERj5NnxSj /tmp/tmp.FmiXMYnZEx ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-6cfcdf54dd-zx7s4 -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.7bm7Of1Nkp ++ mktemp + local LAST_ERR=/tmp/tmp.9OK64tukd0 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-6cfcdf54dd-zx7s4 -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7bm7Of1Nkp + cat /tmp/tmp.9OK64tukd0 + rm /tmp/tmp.7bm7Of1Nkp /tmp/tmp.9OK64tukd0 + return 0 + echo logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.d3W23Cx3SZ/logs_output-percona-server-mongodb-operator-6cfcdf54dd-zx7s4-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-7455 + local namespace=monitoring-2-0-7455 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.AmBLVxAyOp ++ mktemp + local LAST_ERR=/tmp/tmp.kYQCJo2EIi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AmBLVxAyOp customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.kYQCJo2EIi + rm /tmp/tmp.AmBLVxAyOp /tmp/tmp.kYQCJo2EIi + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.cWjg1M2Al2 ++ mktemp + local LAST_ERR=/tmp/tmp.kMAH2tfGmM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cWjg1M2Al2 + cat /tmp/tmp.kMAH2tfGmM + rm /tmp/tmp.cWjg1M2Al2 /tmp/tmp.kMAH2tfGmM + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.0WcxG80aXC ++ mktemp + local LAST_ERR=/tmp/tmp.mDvU8RtZ3l + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0WcxG80aXC + cat /tmp/tmp.mDvU8RtZ3l + rm /tmp/tmp.0WcxG80aXC /tmp/tmp.mDvU8RtZ3l + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.JMQHEIyKdJ ++ mktemp + local LAST_ERR=/tmp/tmp.9D2l02uXwA + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.JMQHEIyKdJ + cat /tmp/tmp.9D2l02uXwA + rm /tmp/tmp.JMQHEIyKdJ /tmp/tmp.9D2l02uXwA + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.XURttY713u ++ mktemp + local LAST_ERR=/tmp/tmp.3nU5Pavyl9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1567/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XURttY713u clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.3nU5Pavyl9 + rm /tmp/tmp.XURttY713u /tmp/tmp.3nU5Pavyl9 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.gMEmD0bYyy ++ mktemp + local LAST_ERR=/tmp/tmp.FjrlmImpOB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gMEmD0bYyy namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted deployment.apps "cert-manager-cainjector" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.FjrlmImpOB Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gMEmD0bYyy namespace "cert-manager" deleted + cat /tmp/tmp.FjrlmImpOB Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.gMEmD0bYyy + cat /tmp/tmp.FjrlmImpOB Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.gMEmD0bYyy + cat /tmp/tmp.FjrlmImpOB Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.gMEmD0bYyy /tmp/tmp.FjrlmImpOB + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-7455 + rm -rf /tmp/tmp.d3W23Cx3SZ + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.mQJELpuKu7 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.2sJ4mEXbgm ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.WyNbH5kEkH + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.2qCsTlDGJu + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-7455