Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-3820 + local ns=monitoring-2-0-3820 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.cLype7jkDN ++ mktemp + local LAST_ERR=/tmp/tmp.V6OJcruz85 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cLype7jkDN customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.V6OJcruz85 + rm /tmp/tmp.cLype7jkDN /tmp/tmp.V6OJcruz85 + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.DdI8PuaXgU ++ mktemp + local LAST_ERR=/tmp/tmp.KkHNle0n1A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DdI8PuaXgU + cat /tmp/tmp.KkHNle0n1A + rm /tmp/tmp.DdI8PuaXgU /tmp/tmp.KkHNle0n1A + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.g4YWZe8QBM ++ mktemp + local LAST_ERR=/tmp/tmp.Ynq4bGc90X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g4YWZe8QBM + cat /tmp/tmp.Ynq4bGc90X + rm /tmp/tmp.g4YWZe8QBM /tmp/tmp.Ynq4bGc90X + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.UIwdNt7Qel ++ mktemp + local LAST_ERR=/tmp/tmp.6C066Qwcj9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.UIwdNt7Qel + cat /tmp/tmp.6C066Qwcj9 + rm /tmp/tmp.UIwdNt7Qel /tmp/tmp.6C066Qwcj9 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.xww0OctH3T ++ mktemp + local LAST_ERR=/tmp/tmp.zE7UwdimEo + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xww0OctH3T clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.zE7UwdimEo + rm /tmp/tmp.xww0OctH3T /tmp/tmp.zE7UwdimEo + return 0 + check_crd_for_deletion PR-1582-3e1c1c98 + local git_tag=PR-1582-3e1c1c98 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1582-3e1c1c98/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Kq9SWGr5Py +++ mktemp ++ local LAST_ERR=/tmp/tmp.aYMShAANeo ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Kq9SWGr5Py ++ cat /tmp/tmp.aYMShAANeo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Kq9SWGr5Py ++ cat /tmp/tmp.aYMShAANeo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.Kq9SWGr5Py ++ cat /tmp/tmp.aYMShAANeo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.Kq9SWGr5Py ++ cat /tmp/tmp.aYMShAANeo Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.Kq9SWGr5Py /tmp/tmp.aYMShAANeo ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.NYrpW99zEE + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.5e2pub7a6E + local LAST_ERR=/tmp/tmp.hxAYvD7dkh + local exit_status=0 + local timeout=4 ++ mktemp + local LAST_ERR=/tmp/tmp.2UnY9ta6hX + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5e2pub7a6E + cat /tmp/tmp.2UnY9ta6hX + rm /tmp/tmp.5e2pub7a6E /tmp/tmp.2UnY9ta6hX + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-14804" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NYrpW99zEE namespace "psmdb-operator" deleted + cat /tmp/tmp.hxAYvD7dkh + rm /tmp/tmp.NYrpW99zEE /tmp/tmp.hxAYvD7dkh + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.GmxjvKILbl ++ mktemp + local LAST_ERR=/tmp/tmp.PTaLfxREEE + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GmxjvKILbl + cat /tmp/tmp.PTaLfxREEE + rm /tmp/tmp.GmxjvKILbl /tmp/tmp.PTaLfxREEE + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.W6kvbkhV9z ++ mktemp + local LAST_ERR=/tmp/tmp.hHlB3x05wD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W6kvbkhV9z namespace/psmdb-operator created + cat /tmp/tmp.hHlB3x05wD + rm /tmp/tmp.W6kvbkhV9z /tmp/tmp.hHlB3x05wD + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.M5GP9UR7iU +++ mktemp ++ local LAST_ERR=/tmp/tmp.7pzVh7UHiM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.M5GP9UR7iU ++ cat /tmp/tmp.7pzVh7UHiM ++ rm /tmp/tmp.M5GP9UR7iU /tmp/tmp.7pzVh7UHiM ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.uGFRFF5q6Z ++ mktemp + local LAST_ERR=/tmp/tmp.yZggbRsb8x + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uGFRFF5q6Z Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5" modified. + cat /tmp/tmp.yZggbRsb8x + rm /tmp/tmp.uGFRFF5q6Z /tmp/tmp.yZggbRsb8x + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.tDNK0rndht ++ mktemp + local LAST_ERR=/tmp/tmp.taRHV2j3MZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tDNK0rndht customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.taRHV2j3MZ + rm /tmp/tmp.tDNK0rndht /tmp/tmp.taRHV2j3MZ + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.zgpHGQPRaP ++ mktemp + local LAST_ERR=/tmp/tmp.waK5ENIIWh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zgpHGQPRaP clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.waK5ENIIWh + rm /tmp/tmp.zgpHGQPRaP /tmp/tmp.waK5ENIIWh + return 0 + kubectl_bin apply -f - ++ mktemp + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1582-3e1c1c98") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-operator.yaml + local LAST_OUT=/tmp/tmp.bhkXl46mld ++ mktemp + local LAST_ERR=/tmp/tmp.baZnRYskXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bhkXl46mld deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.baZnRYskXb + rm /tmp/tmp.bhkXl46mld /tmp/tmp.baZnRYskXb + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.yLhIzcCKp6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Obe2G3UTtB ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.yLhIzcCKp6 ++ cat /tmp/tmp.Obe2G3UTtB ++ rm /tmp/tmp.yLhIzcCKp6 /tmp/tmp.Obe2G3UTtB ++ return 0 + wait_pod percona-server-mongodb-operator-66947885b6-27mxp + local pod=percona-server-mongodb-operator-66947885b6-27mxp + set +o xtrace waiting for pod/percona-server-mongodb-operator-66947885b6-27mxp to be ready.OK + create_namespace monitoring-2-0-3820 + local namespace=monitoring-2-0-3820 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-3820' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-3820 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-3820 --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Yv5wUax6DM + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.iTpZ0JoNrc ++ mktemp + local LAST_ERR=/tmp/tmp.P00tQRzNwI + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.WjZ8XL04ep + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-3820 --ignore-not-found + awk '{print$1}' + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iTpZ0JoNrc + cat /tmp/tmp.WjZ8XL04ep + rm /tmp/tmp.iTpZ0JoNrc /tmp/tmp.WjZ8XL04ep + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Yv5wUax6DM + cat /tmp/tmp.P00tQRzNwI + rm /tmp/tmp.Yv5wUax6DM /tmp/tmp.P00tQRzNwI + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-3820 ++ mktemp + local LAST_OUT=/tmp/tmp.ykOLpReUXO ++ mktemp + local LAST_ERR=/tmp/tmp.xzpWFk8w7y + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-3820 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ykOLpReUXO + cat /tmp/tmp.xzpWFk8w7y + rm /tmp/tmp.ykOLpReUXO /tmp/tmp.xzpWFk8w7y + return 0 + desc 'create namespace monitoring-2-0-3820' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-3820 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-3820 ++ mktemp + local LAST_OUT=/tmp/tmp.l9TpHDivs2 ++ mktemp + local LAST_ERR=/tmp/tmp.CgfSNvVI1C + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-3820 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.l9TpHDivs2 namespace/monitoring-2-0-3820 created + cat /tmp/tmp.CgfSNvVI1C + rm /tmp/tmp.l9TpHDivs2 /tmp/tmp.CgfSNvVI1C + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.DkQrDtyrV0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.nd2ViIQw6X ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.DkQrDtyrV0 ++ cat /tmp/tmp.nd2ViIQw6X ++ rm /tmp/tmp.DkQrDtyrV0 /tmp/tmp.nd2ViIQw6X ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5 --namespace=monitoring-2-0-3820 ++ mktemp + local LAST_OUT=/tmp/tmp.DG0goAXp5n ++ mktemp + local LAST_ERR=/tmp/tmp.nZYiAp5cEe + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5 --namespace=monitoring-2-0-3820 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DG0goAXp5n Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1582-3e1c1c98-5-cluster5" modified. + cat /tmp/tmp.nZYiAp5cEe + rm /tmp/tmp.DG0goAXp5n /tmp/tmp.nZYiAp5cEe + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.HCU7IyrvLz ++ mktemp + local LAST_ERR=/tmp/tmp.Q4iCWLQkv3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.HCU7IyrvLz namespace/cert-manager created + cat /tmp/tmp.Q4iCWLQkv3 + rm /tmp/tmp.HCU7IyrvLz /tmp/tmp.Q4iCWLQkv3 + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.IlzetUB4in ++ mktemp + local LAST_ERR=/tmp/tmp.WuULrs2H3I + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.IlzetUB4in namespace/cert-manager labeled + cat /tmp/tmp.WuULrs2H3I + rm /tmp/tmp.IlzetUB4in /tmp/tmp.WuULrs2H3I + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.cyTwIeFej5 ++ mktemp + local LAST_ERR=/tmp/tmp.vTdPlrXzfK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.cyTwIeFej5 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.vTdPlrXzfK Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.cyTwIeFej5 /tmp/tmp.vTdPlrXzfK + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.p4eHm31xSU ++ mktemp + local LAST_ERR=/tmp/tmp.E6yPNyuJgO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.p4eHm31xSU pod/cert-manager-5658d944df-bfbw4 condition met pod/cert-manager-cainjector-cb99ff845-cnmnn condition met pod/cert-manager-webhook-7fd74b8dc7-wrq52 condition met + cat /tmp/tmp.E6yPNyuJgO + rm /tmp/tmp.p4eHm31xSU /tmp/tmp.E6yPNyuJgO + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Sun Jul 7 13:59:19 2024 NAMESPACE: monitoring-2-0-3820 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-3820.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.i21zPfzqDB ++ mktemp + local LAST_ERR=/tmp/tmp.xqUfMQF4EM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.i21zPfzqDB + cat /tmp/tmp.xqUfMQF4EM + rm /tmp/tmp.i21zPfzqDB /tmp/tmp.xqUfMQF4EM + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.7HOyR0ST4r ++ mktemp + local LAST_ERR=/tmp/tmp.sR9giBQDOh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7HOyR0ST4r secret/some-users created secret/some-users unchanged + cat /tmp/tmp.sR9giBQDOh + rm /tmp/tmp.7HOyR0ST4r /tmp/tmp.sR9giBQDOh + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.GubpwjxuD3 ++ mktemp + local LAST_ERR=/tmp/tmp.W9YVTXjhPr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GubpwjxuD3 deployment.apps/psmdb-client created + cat /tmp/tmp.W9YVTXjhPr + rm /tmp/tmp.GubpwjxuD3 /tmp/tmp.W9YVTXjhPr + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + kubectl_bin apply -f - + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1582-3e1c1c98"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_OUT=/tmp/tmp.kkEXh4j0uX ++ mktemp + local LAST_ERR=/tmp/tmp.HaAVlJXsze + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kkEXh4j0uX perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.HaAVlJXsze + rm /tmp/tmp.kkEXh4j0uX /tmp/tmp.HaAVlJXsze + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ck6MrImQA6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bdzopRapWV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ck6MrImQA6 ++ cat /tmp/tmp.bdzopRapWV ++ rm /tmp/tmp.ck6MrImQA6 /tmp/tmp.bdzopRapWV ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LIZTvUyjXk +++ mktemp ++ local LAST_ERR=/tmp/tmp.tQRXfDYh6F ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.LIZTvUyjXk ++ cat /tmp/tmp.tQRXfDYh6F ++ rm /tmp/tmp.LIZTvUyjXk /tmp/tmp.tQRXfDYh6F ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................. + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.ag0TOy8YmS ++ mktemp + local LAST_ERR=/tmp/tmp.X3qfVysxeG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ag0TOy8YmS + cat /tmp/tmp.X3qfVysxeG + rm /tmp/tmp.ag0TOy8YmS /tmp/tmp.X3qfVysxeG + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3820 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3820 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qcKaWy8m7Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.m3urJO2PTC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.qcKaWy8m7Y ++ cat /tmp/tmp.m3urJO2PTC ++ rm /tmp/tmp.qcKaWy8m7Y /tmp/tmp.m3urJO2PTC ++ return 0 + local client_container=psmdb-client-6cd48df8b6-8r8jz + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.fjbdJ2Rn3A ++ mktemp + local LAST_ERR=/tmp/tmp.48Gy3eYbX9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fjbdJ2Rn3A Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3820.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-07T14:03:31.617Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("a6580e64-7d74-4ed6-856f-ba9de66ec432") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.48Gy3eYbX9 + rm /tmp/tmp.fjbdJ2Rn3A /tmp/tmp.48Gy3eYbX9 + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3820 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3820 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.STtTE0hbtU +++ mktemp ++ local LAST_ERR=/tmp/tmp.bpIeCzBeny ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.STtTE0hbtU ++ cat /tmp/tmp.bpIeCzBeny ++ rm /tmp/tmp.STtTE0hbtU /tmp/tmp.bpIeCzBeny ++ return 0 + local client_container=psmdb-client-6cd48df8b6-8r8jz + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.xrVSay1Sxz ++ mktemp + local LAST_ERR=/tmp/tmp.geIk7dpvgq + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xrVSay1Sxz Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3820.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-07T14:03:34.160Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("8dccd720-4db6-4dc1-bb69-5e38af699150") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1720361014, 8), "signature" : { "hash" : BinData(0,"dHVxqd2ASxv/lAjBLk60mzP5h0Q="), "keyId" : NumberLong("7388894073400066056") } }, "operationTime" : Timestamp(1720361014, 2) } bye + cat /tmp/tmp.geIk7dpvgq + rm /tmp/tmp.xrVSay1Sxz /tmp/tmp.geIk7dpvgq + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3820 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3820 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ecB65MYSjZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.dIXyDVLNuA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ecB65MYSjZ ++ cat /tmp/tmp.dIXyDVLNuA ++ rm /tmp/tmp.ecB65MYSjZ /tmp/tmp.dIXyDVLNuA ++ return 0 + local client_container=psmdb-client-6cd48df8b6-8r8jz + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.LzUcDuDRzo ++ mktemp + local LAST_ERR=/tmp/tmp.blGEzQ6V8m + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LzUcDuDRzo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3820.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-07T14:03:37.624Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("57e539d8-1f44-432e-8033-291cbfbf12d6") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.blGEzQ6V8m + rm /tmp/tmp.LzUcDuDRzo /tmp/tmp.blGEzQ6V8m + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3820 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3820 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.01M7rOzPpO +++ mktemp ++ local LAST_ERR=/tmp/tmp.3NuDyxYuIq ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.01M7rOzPpO ++ cat /tmp/tmp.3NuDyxYuIq ++ rm /tmp/tmp.01M7rOzPpO /tmp/tmp.3NuDyxYuIq ++ return 0 + local client_container=psmdb-client-6cd48df8b6-8r8jz + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.gXGUkOid6a ++ mktemp + local LAST_ERR=/tmp/tmp.zZrJpHL9jO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.gXGUkOid6a Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3820.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-07T14:03:40.818Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("86bb2ec3-3945-451e-a064-4c1e50b5c268") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.zZrJpHL9jO + rm /tmp/tmp.gXGUkOid6a /tmp/tmp.zZrJpHL9jO + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-3820 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-3820 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8biSHNNSOB +++ mktemp ++ local LAST_ERR=/tmp/tmp.n9v9EK3H6U ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8biSHNNSOB ++ cat /tmp/tmp.n9v9EK3H6U ++ rm /tmp/tmp.8biSHNNSOB /tmp/tmp.n9v9EK3H6U ++ return 0 + local client_container=psmdb-client-6cd48df8b6-8r8jz + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.wsleTyMrZ2 ++ mktemp + local LAST_ERR=/tmp/tmp.6hCX6Ym8fz + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-8r8jz -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-3820.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wsleTyMrZ2 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-3820.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-07T14:03:44.236Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b1517572-a2f2-44a7-813a-ab2a33902dbf") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.6hCX6Ym8fz + rm /tmp/tmp.wsleTyMrZ2 /tmp/tmp.6hCX6Ym8fz + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gmcvejdTIK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ux5iaVepqJ ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.gmcvejdTIK ++++ cat /tmp/tmp.ux5iaVepqJ ++++ rm /tmp/tmp.gmcvejdTIK /tmp/tmp.ux5iaVepqJ ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.j7PEieYqkQ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3dyoMThaNz ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.j7PEieYqkQ ++++ cat /tmp/tmp.3dyoMThaNz ++++ rm /tmp/tmp.j7PEieYqkQ /tmp/tmp.3dyoMThaNz ++++ return 0 +++ local ip=35.193.166.177 +++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' +++ echo 35.193.166.177 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@35.193.166.177/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 257 77 --:--:-- --:--:-- --:--:-- 334 + API_KEY='"eyJrIjoiQVFVN2ZJTWFIMjNReGE2UEVNMmxWNzhQeW81b3NQcGMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiQVFVN2ZJTWFIMjNReGE2UEVNMmxWNzhQeW81b3NQcGMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.ljocedC1zG ++ mktemp + local LAST_ERR=/tmp/tmp.T3xnfK20qC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiQVFVN2ZJTWFIMjNReGE2UEVNMmxWNzhQeW81b3NQcGMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ljocedC1zG secret/some-users patched + cat /tmp/tmp.T3xnfK20qC + rm /tmp/tmp.ljocedC1zG /tmp/tmp.T3xnfK20qC + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Yl4GyB5Azx +++ mktemp ++ local LAST_ERR=/tmp/tmp.kRYxqDlUEN ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Yl4GyB5Azx ++ cat /tmp/tmp.kRYxqDlUEN ++ rm /tmp/tmp.Yl4GyB5Azx /tmp/tmp.kRYxqDlUEN ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jlue5DHXuN +++ mktemp ++ local LAST_ERR=/tmp/tmp.0r3EgrQE7m ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jlue5DHXuN ++ cat /tmp/tmp.0r3EgrQE7m ++ rm /tmp/tmp.jlue5DHXuN /tmp/tmp.0r3EgrQE7m ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................. + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.vsnnmfASx5 ++ mktemp + local LAST_ERR=/tmp/tmp.desOTRV9Jn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vsnnmfASx5 + cat /tmp/tmp.desOTRV9Jn + rm /tmp/tmp.vsnnmfASx5 /tmp/tmp.desOTRV9Jn + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.B2lWcAgif4/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.B2lWcAgif4/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.mQrsTVj4ke ++ mktemp + local LAST_ERR=/tmp/tmp.WldwexopxO + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mQrsTVj4ke + cat /tmp/tmp.WldwexopxO + rm /tmp/tmp.mQrsTVj4ke /tmp/tmp.WldwexopxO + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.B2lWcAgif4/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.B2lWcAgif4/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.R993uevIkG ++ mktemp + local LAST_ERR=/tmp/tmp.9WWcgRFgoS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.R993uevIkG + cat /tmp/tmp.9WWcgRFgoS + rm /tmp/tmp.R993uevIkG /tmp/tmp.9WWcgRFgoS + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.B2lWcAgif4/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.B2lWcAgif4/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.wLuZYQgpsv ++ mktemp + local LAST_ERR=/tmp/tmp.BHLAPugFcM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wLuZYQgpsv + cat /tmp/tmp.BHLAPugFcM + rm /tmp/tmp.wLuZYQgpsv /tmp/tmp.BHLAPugFcM + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.B2lWcAgif4/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.B2lWcAgif4/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-3820", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml statefulset/monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.kAxbi6emMZ ++ mktemp + local LAST_ERR=/tmp/tmp.PNrNJsqERv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kAxbi6emMZ + cat /tmp/tmp.PNrNJsqERv + rm /tmp/tmp.kAxbi6emMZ /tmp/tmp.PNrNJsqERv + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.B2lWcAgif4/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.B2lWcAgif4/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-3820-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3820-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720361247 ++ /usr/bin/date -u +%s + local end=1720361307 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KMKz5VFqq8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vNDo1Uo9Ms +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KMKz5VFqq8 +++ cat /tmp/tmp.vNDo1Uo9Ms +++ rm /tmp/tmp.KMKz5VFqq8 /tmp/tmp.vNDo1Uo9Ms +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ local LAST_OUT=/tmp/tmp.D2qeSXRvOk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aTBVQ8juFa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.D2qeSXRvOk +++ cat /tmp/tmp.aTBVQ8juFa +++ rm /tmp/tmp.D2qeSXRvOk /tmp/tmp.aTBVQ8juFa +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + local endpoint=35.193.166.177 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.193.166.177/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-rs0-1%22%7D%29&start=1720361247&end=1720361307&step=60' + grep '^"[0-9]' "1720357766" "1720357766" + get_metric_values mongodb_connections monitoring-2-0-3820-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-3820-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720361249 ++ /usr/bin/date -u +%s + local end=1720361309 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.dhEtN3cuO7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qtukurdH2Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dhEtN3cuO7 +++ cat /tmp/tmp.qtukurdH2Y +++ rm /tmp/tmp.dhEtN3cuO7 /tmp/tmp.qtukurdH2Y +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vqVgOYK5xb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QW9d7ynB5d +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.vqVgOYK5xb +++ cat /tmp/tmp.QW9d7ynB5d +++ rm /tmp/tmp.vqVgOYK5xb /tmp/tmp.QW9d7ynB5d +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + local endpoint=35.193.166.177 + curl -s -k 'https://admin:admin@35.193.166.177/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-rs0-1%22%7D%29&start=1720361249&end=1720361309&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-3820-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3820-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720361251 ++ /usr/bin/date -u +%s + local end=1720361311 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Obbe7jFfF1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PiRik5xZpa +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Obbe7jFfF1 +++ cat /tmp/tmp.PiRik5xZpa +++ rm /tmp/tmp.Obbe7jFfF1 /tmp/tmp.PiRik5xZpa +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pWMDAq9Fj9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nt6wJI6wH0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pWMDAq9Fj9 +++ cat /tmp/tmp.nt6wJI6wH0 +++ rm /tmp/tmp.pWMDAq9Fj9 /tmp/tmp.nt6wJI6wH0 +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + local endpoint=35.193.166.177 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@35.193.166.177/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-cfg-1%22%7D%29&start=1720361251&end=1720361311&step=60' "1720357766" "1720357766" + get_metric_values mongodb_connections monitoring-2-0-3820-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-3820-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720361253 ++ /usr/bin/date -u +%s + local end=1720361313 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jgKB8FLWLM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yljjX6vT7A +++ local exit_status=0 +++ local timeout=4 +++ sed -e 's/^"//; s/"$//;' ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.jgKB8FLWLM +++ cat /tmp/tmp.yljjX6vT7A +++ rm /tmp/tmp.jgKB8FLWLM /tmp/tmp.yljjX6vT7A +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.of8FxNXRVV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.licZUyI7lq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.of8FxNXRVV +++ cat /tmp/tmp.licZUyI7lq +++ rm /tmp/tmp.of8FxNXRVV /tmp/tmp.licZUyI7lq +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + local endpoint=35.193.166.177 + curl -s -k 'https://admin:admin@35.193.166.177/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-cfg-1%22%7D%29&start=1720361253&end=1720361313&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-3820-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-3820-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720361256 ++ /usr/bin/date -u +%s + local end=1720361316 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.KWi3thvRqY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uzcg7KeBjI +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KWi3thvRqY +++ cat /tmp/tmp.uzcg7KeBjI +++ rm /tmp/tmp.KWi3thvRqY /tmp/tmp.uzcg7KeBjI +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n5s9H0KG0V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KZwOaqV4XM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.n5s9H0KG0V +++ cat /tmp/tmp.KZwOaqV4XM +++ rm /tmp/tmp.n5s9H0KG0V /tmp/tmp.KZwOaqV4XM +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + local endpoint=35.193.166.177 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.193.166.177/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-3820-monitoring-mongos-0%22%7D%29&start=1720361256&end=1720361316&step=60' + grep '^"[0-9]' "1720357766" "1720357766" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-07T02:10:08+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-07T14:10:08+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UuCRTth6th ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NPz433h0pQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UuCRTth6th +++ cat /tmp/tmp.NPz433h0pQ +++ rm /tmp/tmp.UuCRTth6th /tmp/tmp.NPz433h0pQ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xHf1gxFEh7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.teeuLGKqpj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xHf1gxFEh7 +++ cat /tmp/tmp.teeuLGKqpj +++ rm /tmp/tmp.xHf1gxFEh7 /tmp/tmp.teeuLGKqpj +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + endpoint=35.193.166.177 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.193.166.177/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "FIND system.version _id" "FIND oplog.rs"' + rm -f payload.json + [[ "TOTAL" "FIND system.version _id" "FIND oplog.rs" == \n\u\l\l ]] + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-07T02:10:11+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-07T14:10:11+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.usXttL8jBy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lV3Iw4bhBf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.usXttL8jBy +++ cat /tmp/tmp.lV3Iw4bhBf +++ rm /tmp/tmp.usXttL8jBy /tmp/tmp.lV3Iw4bhBf +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.b6iLpd7Bz3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iByGmNFFvJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.b6iLpd7Bz3 +++ cat /tmp/tmp.iByGmNFFvJ +++ rm /tmp/tmp.b6iLpd7Bz3 /tmp/tmp.iByGmNFFvJ +++ return 0 ++ local ip=35.193.166.177 ++ '[' -n 35.193.166.177 -a 35.193.166.177 '!=' null ']' ++ echo 35.193.166.177 ++ return + endpoint=35.193.166.177 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@35.193.166.177/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version"' + rm -f payload.json + [[ "TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version" == \n\u\l\l ]] + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KZqjKMrdPO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LHDPFA6zSw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.KZqjKMrdPO +++ cat /tmp/tmp.LHDPFA6zSw +++ rm /tmp/tmp.KZqjKMrdPO /tmp/tmp.LHDPFA6zSw +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2RoeOCTTBP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2V3qa4ii7B +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2RoeOCTTBP +++ cat /tmp/tmp.2V3qa4ii7B +++ rm /tmp/tmp.2RoeOCTTBP /tmp/tmp.2V3qa4ii7B +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HgbT09B1nK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mcO5zA2B4H +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HgbT09B1nK +++ cat /tmp/tmp.mcO5zA2B4H +++ rm /tmp/tmp.HgbT09B1nK /tmp/tmp.mcO5zA2B4H +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Igh9abvBkz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1rroT7nCfE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Igh9abvBkz +++ cat /tmp/tmp.1rroT7nCfE +++ rm /tmp/tmp.Igh9abvBkz /tmp/tmp.1rroT7nCfE +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2XQY8PS4ze ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zQ681bxbpi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2XQY8PS4ze +++ cat /tmp/tmp.zQ681bxbpi +++ rm /tmp/tmp.2XQY8PS4ze /tmp/tmp.zQ681bxbpi +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cXyAWAvu9J ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NR68WDmcSd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cXyAWAvu9J +++ cat /tmp/tmp.NR68WDmcSd +++ rm /tmp/tmp.cXyAWAvu9J /tmp/tmp.NR68WDmcSd +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bNwBQjDNgV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zJyogjhbQH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.bNwBQjDNgV +++ cat /tmp/tmp.zJyogjhbQH +++ rm /tmp/tmp.bNwBQjDNgV /tmp/tmp.zJyogjhbQH +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gmTCaZgP98 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xhRYn6Qd2Q +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.gmTCaZgP98 +++ cat /tmp/tmp.xhRYn6Qd2Q +++ rm /tmp/tmp.gmTCaZgP98 /tmp/tmp.xhRYn6Qd2Q +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.etkGqUfXzV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UPtZqt2PJd +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.etkGqUfXzV +++ cat /tmp/tmp.UPtZqt2PJd +++ rm /tmp/tmp.etkGqUfXzV /tmp/tmp.UPtZqt2PJd +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5wyiWIYddN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5kRrYg4My5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5wyiWIYddN +++ cat /tmp/tmp.5kRrYg4My5 +++ rm /tmp/tmp.5wyiWIYddN /tmp/tmp.5kRrYg4My5 +++ return 0 ++ echo /node_id/2c27bde6-778d-4eaa-8dcb-2dc0b1a55007 /node_id/217117f6-ef03-45dc-877f-91db090cfaca /node_id/d40fe064-57fc-4fb9-8b7b-779e36fb1316 /node_id/835799cc-c9fc-4951-bd76-1edb86881412 /node_id/0cf881b7-0ef8-44f5-899d-11d433365fa6 /node_id/0453758d-caad-4687-a25d-2ffd3a27bdb3 /node_id/c1b33882-129b-4246-8d0e-9078fbb9660b /node_id/c80bc7c9-0e84-425a-8d2e-074127673580 /node_id/b0621f8e-75ea-4a81-a9cf-f4f0ee247cc9 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/2c27bde6-778d-4eaa-8dcb-2dc0b1a55007 /node_id/217117f6-ef03-45dc-877f-91db090cfaca /node_id/d40fe064-57fc-4fb9-8b7b-779e36fb1316 /node_id/835799cc-c9fc-4951-bd76-1edb86881412 /node_id/0cf881b7-0ef8-44f5-899d-11d433365fa6 /node_id/0453758d-caad-4687-a25d-2ffd3a27bdb3 /node_id/c1b33882-129b-4246-8d0e-9078fbb9660b /node_id/c80bc7c9-0e84-425a-8d2e-074127673580 /node_id/b0621f8e-75ea-4a81-a9cf-f4f0ee247cc9 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2c27bde6-778d-4eaa-8dcb-2dc0b1a55007 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.dVGx8uQcu2 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qufXhAUJFa +++++ local exit_status=0 +++++ local timeout=4 +++ awk '{print $4}' ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dVGx8uQcu2 +++++ cat /tmp/tmp.qufXhAUJFa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dVGx8uQcu2 +++++ cat /tmp/tmp.qufXhAUJFa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.dVGx8uQcu2 +++++ cat /tmp/tmp.qufXhAUJFa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.dVGx8uQcu2 +++++ cat /tmp/tmp.qufXhAUJFa Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.dVGx8uQcu2 /tmp/tmp.qufXhAUJFa +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eYA8FBT8Si ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eLzqyronww +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eYA8FBT8Si +++ cat /tmp/tmp.eLzqyronww command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eYA8FBT8Si +++ cat /tmp/tmp.eLzqyronww command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eYA8FBT8Si +++ cat /tmp/tmp.eLzqyronww command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.eYA8FBT8Si +++ cat /tmp/tmp.eLzqyronww command terminated with exit code 1 +++ rm /tmp/tmp.eYA8FBT8Si /tmp/tmp.eLzqyronww +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service +++ grep /node_id/217117f6-ef03-45dc-877f-91db090cfaca ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.0vmmBe3nkq ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Xb6PN5USCo +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0vmmBe3nkq +++++ cat /tmp/tmp.Xb6PN5USCo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0vmmBe3nkq +++++ cat /tmp/tmp.Xb6PN5USCo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0vmmBe3nkq +++++ cat /tmp/tmp.Xb6PN5USCo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.0vmmBe3nkq +++++ cat /tmp/tmp.Xb6PN5USCo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.0vmmBe3nkq /tmp/tmp.Xb6PN5USCo +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UeiGZnoXuS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3UbRpmR4w1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UeiGZnoXuS +++ cat /tmp/tmp.3UbRpmR4w1 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UeiGZnoXuS +++ cat /tmp/tmp.3UbRpmR4w1 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UeiGZnoXuS +++ cat /tmp/tmp.3UbRpmR4w1 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.UeiGZnoXuS +++ cat /tmp/tmp.3UbRpmR4w1 command terminated with exit code 1 +++ rm /tmp/tmp.UeiGZnoXuS /tmp/tmp.3UbRpmR4w1 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d40fe064-57fc-4fb9-8b7b-779e36fb1316 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.l6F1MZDBeH ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.czNKWWxRn3 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.l6F1MZDBeH +++++ cat /tmp/tmp.czNKWWxRn3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.l6F1MZDBeH +++++ cat /tmp/tmp.czNKWWxRn3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.l6F1MZDBeH +++++ cat /tmp/tmp.czNKWWxRn3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.l6F1MZDBeH +++++ cat /tmp/tmp.czNKWWxRn3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.l6F1MZDBeH /tmp/tmp.czNKWWxRn3 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.b6R19udIuh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hU9DjCGWTN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.b6R19udIuh +++ cat /tmp/tmp.hU9DjCGWTN command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.b6R19udIuh +++ cat /tmp/tmp.hU9DjCGWTN command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.b6R19udIuh +++ cat /tmp/tmp.hU9DjCGWTN command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.b6R19udIuh +++ cat /tmp/tmp.hU9DjCGWTN command terminated with exit code 1 +++ rm /tmp/tmp.b6R19udIuh /tmp/tmp.hU9DjCGWTN +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/835799cc-c9fc-4951-bd76-1edb86881412 ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6TdaRBAoj7 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.P051nE1YWy +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6TdaRBAoj7 +++++ cat /tmp/tmp.P051nE1YWy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6TdaRBAoj7 +++++ cat /tmp/tmp.P051nE1YWy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6TdaRBAoj7 +++++ cat /tmp/tmp.P051nE1YWy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.6TdaRBAoj7 +++++ cat /tmp/tmp.P051nE1YWy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.6TdaRBAoj7 /tmp/tmp.P051nE1YWy +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.LBP7qwfYd7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.S75Mh3RQBp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.LBP7qwfYd7 +++ cat /tmp/tmp.S75Mh3RQBp command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.LBP7qwfYd7 +++ cat /tmp/tmp.S75Mh3RQBp command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.LBP7qwfYd7 +++ cat /tmp/tmp.S75Mh3RQBp command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.LBP7qwfYd7 +++ cat /tmp/tmp.S75Mh3RQBp command terminated with exit code 1 +++ rm /tmp/tmp.LBP7qwfYd7 /tmp/tmp.S75Mh3RQBp +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0cf881b7-0ef8-44f5-899d-11d433365fa6 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.TZXV5Jv13o ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.veD1gIk4k2 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.TZXV5Jv13o +++++ cat /tmp/tmp.veD1gIk4k2 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.TZXV5Jv13o +++++ cat /tmp/tmp.veD1gIk4k2 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.TZXV5Jv13o +++++ cat /tmp/tmp.veD1gIk4k2 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.TZXV5Jv13o +++++ cat /tmp/tmp.veD1gIk4k2 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.TZXV5Jv13o /tmp/tmp.veD1gIk4k2 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mEmtXThHb7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VqK5Rnerzb +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mEmtXThHb7 +++ cat /tmp/tmp.VqK5Rnerzb command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mEmtXThHb7 +++ cat /tmp/tmp.VqK5Rnerzb command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.mEmtXThHb7 +++ cat /tmp/tmp.VqK5Rnerzb command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.mEmtXThHb7 +++ cat /tmp/tmp.VqK5Rnerzb command terminated with exit code 1 +++ rm /tmp/tmp.mEmtXThHb7 /tmp/tmp.VqK5Rnerzb +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0453758d-caad-4687-a25d-2ffd3a27bdb3 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.UDXqGObiNy ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.naKtBU08Ti +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UDXqGObiNy +++++ cat /tmp/tmp.naKtBU08Ti Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UDXqGObiNy +++++ cat /tmp/tmp.naKtBU08Ti Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UDXqGObiNy +++++ cat /tmp/tmp.naKtBU08Ti Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.UDXqGObiNy +++++ cat /tmp/tmp.naKtBU08Ti Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.UDXqGObiNy /tmp/tmp.naKtBU08Ti +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2eAOOCsZy1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0dX5cHiqHM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2eAOOCsZy1 +++ cat /tmp/tmp.0dX5cHiqHM command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2eAOOCsZy1 +++ cat /tmp/tmp.0dX5cHiqHM command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2eAOOCsZy1 +++ cat /tmp/tmp.0dX5cHiqHM command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.2eAOOCsZy1 +++ cat /tmp/tmp.0dX5cHiqHM command terminated with exit code 1 +++ rm /tmp/tmp.2eAOOCsZy1 /tmp/tmp.0dX5cHiqHM +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/c1b33882-129b-4246-8d0e-9078fbb9660b ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.iyU0Fr98NU ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.oJR3hC9OBC +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.iyU0Fr98NU +++++ cat /tmp/tmp.oJR3hC9OBC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.iyU0Fr98NU +++++ cat /tmp/tmp.oJR3hC9OBC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.iyU0Fr98NU +++++ cat /tmp/tmp.oJR3hC9OBC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.iyU0Fr98NU +++++ cat /tmp/tmp.oJR3hC9OBC Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.iyU0Fr98NU /tmp/tmp.oJR3hC9OBC +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2EasATz5bd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yy9WBxJE8p +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2EasATz5bd +++ cat /tmp/tmp.yy9WBxJE8p command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2EasATz5bd +++ cat /tmp/tmp.yy9WBxJE8p command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.2EasATz5bd +++ cat /tmp/tmp.yy9WBxJE8p command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.2EasATz5bd +++ cat /tmp/tmp.yy9WBxJE8p command terminated with exit code 1 +++ rm /tmp/tmp.2EasATz5bd /tmp/tmp.yy9WBxJE8p +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c80bc7c9-0e84-425a-8d2e-074127673580 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qgkPZmJNpL ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.d6JXq6CDUy +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qgkPZmJNpL +++++ cat /tmp/tmp.d6JXq6CDUy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qgkPZmJNpL +++++ cat /tmp/tmp.d6JXq6CDUy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qgkPZmJNpL +++++ cat /tmp/tmp.d6JXq6CDUy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.qgkPZmJNpL +++++ cat /tmp/tmp.d6JXq6CDUy Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.qgkPZmJNpL /tmp/tmp.d6JXq6CDUy +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TZ6yDqdtdf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0L9U3Kfkqu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TZ6yDqdtdf +++ cat /tmp/tmp.0L9U3Kfkqu command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TZ6yDqdtdf +++ cat /tmp/tmp.0L9U3Kfkqu command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.TZ6yDqdtdf +++ cat /tmp/tmp.0L9U3Kfkqu command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.TZ6yDqdtdf +++ cat /tmp/tmp.0L9U3Kfkqu command terminated with exit code 1 +++ rm /tmp/tmp.TZ6yDqdtdf /tmp/tmp.0L9U3Kfkqu +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b0621f8e-75ea-4a81-a9cf-f4f0ee247cc9 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++ awk '{print $4}' +++++ local LAST_OUT=/tmp/tmp.yzsJYZQfxK ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.VZHkbssV7F +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.yzsJYZQfxK +++++ cat /tmp/tmp.VZHkbssV7F Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.yzsJYZQfxK +++++ cat /tmp/tmp.VZHkbssV7F Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.yzsJYZQfxK +++++ cat /tmp/tmp.VZHkbssV7F Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.yzsJYZQfxK +++++ cat /tmp/tmp.VZHkbssV7F Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.yzsJYZQfxK /tmp/tmp.VZHkbssV7F +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.33FCT9dQEH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gdMToQJFM4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.33FCT9dQEH +++ cat /tmp/tmp.gdMToQJFM4 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.33FCT9dQEH +++ cat /tmp/tmp.gdMToQJFM4 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.33FCT9dQEH +++ cat /tmp/tmp.gdMToQJFM4 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.33FCT9dQEH +++ cat /tmp/tmp.gdMToQJFM4 command terminated with exit code 1 +++ rm /tmp/tmp.33FCT9dQEH /tmp/tmp.gdMToQJFM4 +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.xBfBoSA0nZ ++ mktemp + local LAST_ERR=/tmp/tmp.vflP930WwR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xBfBoSA0nZ perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.vflP930WwR + rm /tmp/tmp.xBfBoSA0nZ /tmp/tmp.vflP930WwR + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace pod/monitoring-mongos-0 - ............................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace pod/monitoring-rs0-0 - ........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace pod/monitoring-cfg-0 - .......Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.lGZojp5QXD ++ mktemp + local LAST_ERR=/tmp/tmp.bWwTe4fnSL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lGZojp5QXD NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27017/TCP 16m + cat /tmp/tmp.bWwTe4fnSL + rm /tmp/tmp.lGZojp5QXD /tmp/tmp.bWwTe4fnSL + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.F4MzoZbIG1 ++ mktemp + local LAST_ERR=/tmp/tmp.1hjc97hZo2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.F4MzoZbIG1 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27017/TCP 16m + cat /tmp/tmp.1hjc97hZo2 + rm /tmp/tmp.F4MzoZbIG1 /tmp/tmp.1hjc97hZo2 + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.a72GWb6eol ++ mktemp + local LAST_ERR=/tmp/tmp.Zr7PBPBklC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.a72GWb6eol NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 10.213.148.153 27017/TCP 15m + cat /tmp/tmp.Zr7PBPBklC + rm /tmp/tmp.a72GWb6eol /tmp/tmp.Zr7PBPBklC + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/2c27bde6-778d-4eaa-8dcb-2dc0b1a55007 /node_id/217117f6-ef03-45dc-877f-91db090cfaca /node_id/d40fe064-57fc-4fb9-8b7b-779e36fb1316 /node_id/835799cc-c9fc-4951-bd76-1edb86881412 /node_id/0cf881b7-0ef8-44f5-899d-11d433365fa6 /node_id/0453758d-caad-4687-a25d-2ffd3a27bdb3 /node_id/c1b33882-129b-4246-8d0e-9078fbb9660b /node_id/c80bc7c9-0e84-425a-8d2e-074127673580 /node_id/b0621f8e-75ea-4a81-a9cf-f4f0ee247cc9 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2c27bde6-778d-4eaa-8dcb-2dc0b1a55007 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Tz4nc2ydQc ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.l2wD1QXQSr +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Tz4nc2ydQc +++++ cat /tmp/tmp.l2wD1QXQSr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Tz4nc2ydQc +++++ cat /tmp/tmp.l2wD1QXQSr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Tz4nc2ydQc +++++ cat /tmp/tmp.l2wD1QXQSr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.Tz4nc2ydQc +++++ cat /tmp/tmp.l2wD1QXQSr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.Tz4nc2ydQc /tmp/tmp.l2wD1QXQSr +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j8mfCqG7T9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4bm7USPz5l +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.j8mfCqG7T9 +++ cat /tmp/tmp.4bm7USPz5l command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.j8mfCqG7T9 +++ cat /tmp/tmp.4bm7USPz5l command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.j8mfCqG7T9 +++ cat /tmp/tmp.4bm7USPz5l command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.j8mfCqG7T9 +++ cat /tmp/tmp.4bm7USPz5l command terminated with exit code 1 +++ rm /tmp/tmp.j8mfCqG7T9 /tmp/tmp.4bm7USPz5l +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/217117f6-ef03-45dc-877f-91db090cfaca +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.c8up7YziZc ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.g19i5QRHZf +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.c8up7YziZc +++++ cat /tmp/tmp.g19i5QRHZf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.c8up7YziZc +++++ cat /tmp/tmp.g19i5QRHZf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.c8up7YziZc +++++ cat /tmp/tmp.g19i5QRHZf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.c8up7YziZc +++++ cat /tmp/tmp.g19i5QRHZf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.c8up7YziZc /tmp/tmp.g19i5QRHZf +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sOvtjAlv6k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9sl7m8Y4mx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sOvtjAlv6k +++ cat /tmp/tmp.9sl7m8Y4mx command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sOvtjAlv6k +++ cat /tmp/tmp.9sl7m8Y4mx command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sOvtjAlv6k +++ cat /tmp/tmp.9sl7m8Y4mx command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.sOvtjAlv6k +++ cat /tmp/tmp.9sl7m8Y4mx command terminated with exit code 1 +++ rm /tmp/tmp.sOvtjAlv6k /tmp/tmp.9sl7m8Y4mx +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d40fe064-57fc-4fb9-8b7b-779e36fb1316 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.tkFxw5rhFk ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.XbZl9VnKej +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tkFxw5rhFk +++++ cat /tmp/tmp.XbZl9VnKej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tkFxw5rhFk +++++ cat /tmp/tmp.XbZl9VnKej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tkFxw5rhFk +++++ cat /tmp/tmp.XbZl9VnKej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.tkFxw5rhFk +++++ cat /tmp/tmp.XbZl9VnKej Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.tkFxw5rhFk /tmp/tmp.XbZl9VnKej +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SW6TLXLWQB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Mrm1807o7P +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SW6TLXLWQB +++ cat /tmp/tmp.Mrm1807o7P command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SW6TLXLWQB +++ cat /tmp/tmp.Mrm1807o7P command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.SW6TLXLWQB +++ cat /tmp/tmp.Mrm1807o7P command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.SW6TLXLWQB +++ cat /tmp/tmp.Mrm1807o7P command terminated with exit code 1 +++ rm /tmp/tmp.SW6TLXLWQB /tmp/tmp.Mrm1807o7P +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/835799cc-c9fc-4951-bd76-1edb86881412 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.V800T1WLa6 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.M8BsXsWQKs +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.V800T1WLa6 +++++ cat /tmp/tmp.M8BsXsWQKs Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.V800T1WLa6 +++++ cat /tmp/tmp.M8BsXsWQKs Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.V800T1WLa6 +++++ cat /tmp/tmp.M8BsXsWQKs Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.V800T1WLa6 +++++ cat /tmp/tmp.M8BsXsWQKs Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.V800T1WLa6 /tmp/tmp.M8BsXsWQKs +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.KzWJrgzixU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3njPHLDnUi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KzWJrgzixU +++ cat /tmp/tmp.3njPHLDnUi command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KzWJrgzixU +++ cat /tmp/tmp.3njPHLDnUi command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.KzWJrgzixU +++ cat /tmp/tmp.3njPHLDnUi command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.KzWJrgzixU +++ cat /tmp/tmp.3njPHLDnUi command terminated with exit code 1 +++ rm /tmp/tmp.KzWJrgzixU /tmp/tmp.3njPHLDnUi +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0cf881b7-0ef8-44f5-899d-11d433365fa6 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.weR7CVHQf0 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.tdchv7XVHN +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.weR7CVHQf0 +++++ cat /tmp/tmp.tdchv7XVHN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.weR7CVHQf0 +++++ cat /tmp/tmp.tdchv7XVHN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.weR7CVHQf0 +++++ cat /tmp/tmp.tdchv7XVHN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.weR7CVHQf0 +++++ cat /tmp/tmp.tdchv7XVHN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.weR7CVHQf0 /tmp/tmp.tdchv7XVHN +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UFJayN0JrW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8o4QmwZVuG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UFJayN0JrW +++ cat /tmp/tmp.8o4QmwZVuG command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UFJayN0JrW +++ cat /tmp/tmp.8o4QmwZVuG command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.UFJayN0JrW +++ cat /tmp/tmp.8o4QmwZVuG command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.UFJayN0JrW +++ cat /tmp/tmp.8o4QmwZVuG command terminated with exit code 1 +++ rm /tmp/tmp.UFJayN0JrW /tmp/tmp.8o4QmwZVuG +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++ grep /node_id/0453758d-caad-4687-a25d-2ffd3a27bdb3 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ awk '{print $4}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.e8tQe1jaBz ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.D3WNmELZPL +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.e8tQe1jaBz +++++ cat /tmp/tmp.D3WNmELZPL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.e8tQe1jaBz +++++ cat /tmp/tmp.D3WNmELZPL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.e8tQe1jaBz +++++ cat /tmp/tmp.D3WNmELZPL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.e8tQe1jaBz +++++ cat /tmp/tmp.D3WNmELZPL Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.e8tQe1jaBz /tmp/tmp.D3WNmELZPL +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6tH8R6yK2P ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0vpISHo2jD +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.6tH8R6yK2P +++ cat /tmp/tmp.0vpISHo2jD command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.6tH8R6yK2P +++ cat /tmp/tmp.0vpISHo2jD command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.6tH8R6yK2P +++ cat /tmp/tmp.0vpISHo2jD command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.6tH8R6yK2P +++ cat /tmp/tmp.0vpISHo2jD command terminated with exit code 1 +++ rm /tmp/tmp.6tH8R6yK2P /tmp/tmp.0vpISHo2jD +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c1b33882-129b-4246-8d0e-9078fbb9660b +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.LQFiuLBU53 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.N53yH0tyCU +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LQFiuLBU53 +++++ cat /tmp/tmp.N53yH0tyCU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LQFiuLBU53 +++++ cat /tmp/tmp.N53yH0tyCU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.LQFiuLBU53 +++++ cat /tmp/tmp.N53yH0tyCU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.LQFiuLBU53 +++++ cat /tmp/tmp.N53yH0tyCU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.LQFiuLBU53 /tmp/tmp.N53yH0tyCU +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nb3uudOaQ3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hfMCIn3QTk +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nb3uudOaQ3 +++ cat /tmp/tmp.hfMCIn3QTk command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nb3uudOaQ3 +++ cat /tmp/tmp.hfMCIn3QTk command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nb3uudOaQ3 +++ cat /tmp/tmp.hfMCIn3QTk command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.nb3uudOaQ3 +++ cat /tmp/tmp.hfMCIn3QTk command terminated with exit code 1 +++ rm /tmp/tmp.nb3uudOaQ3 /tmp/tmp.hfMCIn3QTk +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c80bc7c9-0e84-425a-8d2e-074127673580 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qWiiw1H1s9 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.58DANsTPZ7 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qWiiw1H1s9 +++++ cat /tmp/tmp.58DANsTPZ7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qWiiw1H1s9 +++++ cat /tmp/tmp.58DANsTPZ7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.qWiiw1H1s9 +++++ cat /tmp/tmp.58DANsTPZ7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.qWiiw1H1s9 +++++ cat /tmp/tmp.58DANsTPZ7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.qWiiw1H1s9 /tmp/tmp.58DANsTPZ7 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.37NDhexKiv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1oOteb96Qv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.37NDhexKiv +++ cat /tmp/tmp.1oOteb96Qv command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.37NDhexKiv +++ cat /tmp/tmp.1oOteb96Qv command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.37NDhexKiv +++ cat /tmp/tmp.1oOteb96Qv command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.37NDhexKiv +++ cat /tmp/tmp.1oOteb96Qv command terminated with exit code 1 +++ rm /tmp/tmp.37NDhexKiv /tmp/tmp.1oOteb96Qv +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b0621f8e-75ea-4a81-a9cf-f4f0ee247cc9 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.Hld6nVWkRQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Wq4HhepBuo +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Hld6nVWkRQ +++++ cat /tmp/tmp.Wq4HhepBuo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Hld6nVWkRQ +++++ cat /tmp/tmp.Wq4HhepBuo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.Hld6nVWkRQ +++++ cat /tmp/tmp.Wq4HhepBuo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.Hld6nVWkRQ +++++ cat /tmp/tmp.Wq4HhepBuo Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.Hld6nVWkRQ /tmp/tmp.Wq4HhepBuo +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.w8eSzC4pzO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MHMvzYJydq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8eSzC4pzO +++ cat /tmp/tmp.MHMvzYJydq command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8eSzC4pzO +++ cat /tmp/tmp.MHMvzYJydq command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-3820 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8eSzC4pzO +++ cat /tmp/tmp.MHMvzYJydq command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.w8eSzC4pzO +++ cat /tmp/tmp.MHMvzYJydq command terminated with exit code 1 +++ rm /tmp/tmp.w8eSzC4pzO /tmp/tmp.MHMvzYJydq +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QCiJ2QNNKV +++ mktemp ++ local LAST_ERR=/tmp/tmp.hiDojBKCwM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QCiJ2QNNKV ++ cat /tmp/tmp.hiDojBKCwM error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-3820" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QCiJ2QNNKV ++ cat /tmp/tmp.hiDojBKCwM error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-3820" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.QCiJ2QNNKV ++ cat /tmp/tmp.hiDojBKCwM error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-3820" ++ sleep 8 ++ cat /tmp/tmp.QCiJ2QNNKV ++ cat /tmp/tmp.hiDojBKCwM error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-3820" ++ rm /tmp/tmp.QCiJ2QNNKV /tmp/tmp.hiDojBKCwM ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ kubectl_bin get secrets -o json ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' +++ mktemp ++ local LAST_OUT=/tmp/tmp.leLBIYqlEU +++ mktemp ++ local LAST_ERR=/tmp/tmp.tjPt1192Bn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.leLBIYqlEU ++ cat /tmp/tmp.tjPt1192Bn ++ rm /tmp/tmp.leLBIYqlEU /tmp/tmp.tjPt1192Bn ++ return 0 + secrets='YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ymdTpWJBcr +++ mktemp ++ local LAST_ERR=/tmp/tmp.BNw2No7EOn ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ymdTpWJBcr ++ cat /tmp/tmp.BNw2No7EOn ++ rm /tmp/tmp.ymdTpWJBcr /tmp/tmp.BNw2No7EOn ++ return 0 + pods='monitoring-0 psmdb-client-6cd48df8b6-8r8jz' + echo pods=monitoring-0 psmdb-client-6cd48df8b6-8r8jz pods=monitoring-0 psmdb-client-6cd48df8b6-8r8jz + collect_logs monitoring-2-0-3820 + local containers + local count + NS=monitoring-2-0-3820 + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-3820 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oXO0T8maS9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zDCgeNk2kj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-3820 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.oXO0T8maS9 ++ cat /tmp/tmp.zDCgeNk2kj ++ rm /tmp/tmp.oXO0T8maS9 /tmp/tmp.zDCgeNk2kj ++ return 0 + containers=monitoring + for c in '$containers' + [[ monitoring =~ pmm ]] + kubectl_bin -n monitoring-2-0-3820 logs monitoring-0 -c monitoring ++ mktemp + local LAST_OUT=/tmp/tmp.kdMTtVjGri ++ mktemp + local LAST_ERR=/tmp/tmp.TashTgF7ha + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-3820 logs monitoring-0 -c monitoring + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kdMTtVjGri + cat /tmp/tmp.TashTgF7ha + rm /tmp/tmp.kdMTtVjGri /tmp/tmp.TashTgF7ha + return 0 + echo logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-3820 get pod psmdb-client-6cd48df8b6-8r8jz -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ugmrrHdqF8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.j53nYULluJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-3820 get pod psmdb-client-6cd48df8b6-8r8jz -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ugmrrHdqF8 ++ cat /tmp/tmp.j53nYULluJ ++ rm /tmp/tmp.ugmrrHdqF8 /tmp/tmp.j53nYULluJ ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-2-0-3820 logs psmdb-client-6cd48df8b6-8r8jz -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.5svPNaBWKA ++ mktemp + local LAST_ERR=/tmp/tmp.BcOkuDIRxG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-3820 logs psmdb-client-6cd48df8b6-8r8jz -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5svPNaBWKA + cat /tmp/tmp.BcOkuDIRxG + rm /tmp/tmp.5svPNaBWKA /tmp/tmp.BcOkuDIRxG + return 0 + echo logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-psmdb-client-6cd48df8b6-8r8jz-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ awk -F / '{print $2}' ++ kubectl_bin -n psmdb-operator get pods -o name +++ mktemp ++ local LAST_OUT=/tmp/tmp.4MJhhsuw1i +++ mktemp ++ local LAST_ERR=/tmp/tmp.XIsvoFPGjy ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.4MJhhsuw1i ++ cat /tmp/tmp.XIsvoFPGjy ++ rm /tmp/tmp.4MJhhsuw1i /tmp/tmp.XIsvoFPGjy ++ return 0 + pods=percona-server-mongodb-operator-66947885b6-27mxp + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-66947885b6-27mxp -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Op5IJ4ik2y +++ mktemp ++ local LAST_ERR=/tmp/tmp.RZDbA842Jx ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-66947885b6-27mxp -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Op5IJ4ik2y ++ cat /tmp/tmp.RZDbA842Jx ++ rm /tmp/tmp.Op5IJ4ik2y /tmp/tmp.RZDbA842Jx ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-66947885b6-27mxp -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.OliOe6zoGm ++ mktemp + local LAST_ERR=/tmp/tmp.Ihxv3PHURB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-66947885b6-27mxp -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OliOe6zoGm + cat /tmp/tmp.Ihxv3PHURB + rm /tmp/tmp.OliOe6zoGm /tmp/tmp.Ihxv3PHURB + return 0 + echo logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.B2lWcAgif4/logs_output-percona-server-mongodb-operator-66947885b6-27mxp-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-3820 + local namespace=monitoring-2-0-3820 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.PCeoZUAaS3 ++ mktemp + local LAST_ERR=/tmp/tmp.ifc9qOKq2j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.PCeoZUAaS3 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.ifc9qOKq2j + rm /tmp/tmp.PCeoZUAaS3 /tmp/tmp.ifc9qOKq2j + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ipCML15OSd ++ mktemp + local LAST_ERR=/tmp/tmp.4WB5KiOzjL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ipCML15OSd + cat /tmp/tmp.4WB5KiOzjL + rm /tmp/tmp.ipCML15OSd /tmp/tmp.4WB5KiOzjL + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.b7asAD5viY ++ mktemp + local LAST_ERR=/tmp/tmp.w45z6pcyJv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.b7asAD5viY + cat /tmp/tmp.w45z6pcyJv + rm /tmp/tmp.b7asAD5viY /tmp/tmp.w45z6pcyJv + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.nz0O9a3d2c ++ mktemp + local LAST_ERR=/tmp/tmp.RzDhLZJsuJ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nz0O9a3d2c + cat /tmp/tmp.RzDhLZJsuJ + rm /tmp/tmp.nz0O9a3d2c /tmp/tmp.RzDhLZJsuJ + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.Oz4iVeRBuK ++ mktemp + local LAST_ERR=/tmp/tmp.m1skKcfjq3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1582/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Oz4iVeRBuK clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.m1skKcfjq3 + rm /tmp/tmp.Oz4iVeRBuK /tmp/tmp.m1skKcfjq3 + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.R1z80dH7HW ++ mktemp + local LAST_ERR=/tmp/tmp.j6q9HLujnH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.R1z80dH7HW namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.j6q9HLujnH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.R1z80dH7HW namespace "cert-manager" deleted + cat /tmp/tmp.j6q9HLujnH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.R1z80dH7HW + cat /tmp/tmp.j6q9HLujnH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.R1z80dH7HW + cat /tmp/tmp.j6q9HLujnH Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.R1z80dH7HW /tmp/tmp.j6q9HLujnH + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.B2lWcAgif4 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-3820 ++ mktemp + desc 'test passed' + local LAST_OUT=/tmp/tmp.DXJDbe4Xzl + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp + local LAST_ERR=/tmp/tmp.Q1qWiZL6gp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-3820 ++ mktemp + local LAST_OUT=/tmp/tmp.Y3os6xRMR9 ++ mktemp + local LAST_ERR=/tmp/tmp.0lmRSb7M25 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator