Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-19620 + local ns=monitoring-2-0-19620 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.V7shKMHNmn ++ mktemp + local LAST_ERR=/tmp/tmp.jAfHU6dlMD + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.V7shKMHNmn customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.jAfHU6dlMD + rm /tmp/tmp.V7shKMHNmn /tmp/tmp.jAfHU6dlMD + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.5UKLYuNXE6 ++ mktemp + local LAST_ERR=/tmp/tmp.uA8Erskxvc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5UKLYuNXE6 + cat /tmp/tmp.uA8Erskxvc + rm /tmp/tmp.5UKLYuNXE6 /tmp/tmp.uA8Erskxvc + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.ulGqgzoSTE ++ mktemp + local LAST_ERR=/tmp/tmp.tNQf5HM7kQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ulGqgzoSTE + cat /tmp/tmp.tNQf5HM7kQ + rm /tmp/tmp.ulGqgzoSTE /tmp/tmp.tNQf5HM7kQ + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.SSWrBkZd0U ++ mktemp + local LAST_ERR=/tmp/tmp.XhuVAWk2hB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SSWrBkZd0U + cat /tmp/tmp.XhuVAWk2hB + rm /tmp/tmp.SSWrBkZd0U /tmp/tmp.XhuVAWk2hB + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.u83ZUex1QQ ++ mktemp + local LAST_ERR=/tmp/tmp.b6WyMFmhHC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.u83ZUex1QQ clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.b6WyMFmhHC + rm /tmp/tmp.u83ZUex1QQ /tmp/tmp.b6WyMFmhHC + return 0 + check_crd_for_deletion PR-1574-f53081a3 + local git_tag=PR-1574-f53081a3 ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1574-f53081a3/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zG7hSaf8GQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.rAaXoWcqeJ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zG7hSaf8GQ ++ cat /tmp/tmp.rAaXoWcqeJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zG7hSaf8GQ ++ cat /tmp/tmp.rAaXoWcqeJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.zG7hSaf8GQ ++ cat /tmp/tmp.rAaXoWcqeJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.zG7hSaf8GQ ++ cat /tmp/tmp.rAaXoWcqeJ Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.zG7hSaf8GQ /tmp/tmp.rAaXoWcqeJ ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Hy5XGfZ96p + local LAST_OUT=/tmp/tmp.ysjxkwReiS ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.1tTOLqSlfY + local LAST_ERR=/tmp/tmp.yhI8kFnVLj + local exit_status=0 + local exit_status=0 + local timeout=4 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Hy5XGfZ96p + cat /tmp/tmp.1tTOLqSlfY + rm /tmp/tmp.Hy5XGfZ96p /tmp/tmp.1tTOLqSlfY + return 0 namespace "cert-manager" deleted namespace "monitoring-2-0-17219" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ysjxkwReiS namespace "psmdb-operator" deleted + cat /tmp/tmp.yhI8kFnVLj + rm /tmp/tmp.ysjxkwReiS /tmp/tmp.yhI8kFnVLj + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ifVi5B3u37 ++ mktemp + local LAST_ERR=/tmp/tmp.ev2PGLtSUs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ifVi5B3u37 + cat /tmp/tmp.ev2PGLtSUs + rm /tmp/tmp.ifVi5B3u37 /tmp/tmp.ev2PGLtSUs + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BUs9dmq44w ++ mktemp + local LAST_ERR=/tmp/tmp.j3hTDaHHJk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BUs9dmq44w namespace/psmdb-operator created + cat /tmp/tmp.j3hTDaHHJk + rm /tmp/tmp.BUs9dmq44w /tmp/tmp.j3hTDaHHJk + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.I0vhnHcOIi +++ mktemp ++ local LAST_ERR=/tmp/tmp.HLNG2L3vLC ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.I0vhnHcOIi ++ cat /tmp/tmp.HLNG2L3vLC ++ rm /tmp/tmp.I0vhnHcOIi /tmp/tmp.HLNG2L3vLC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vrqxLah7Hm ++ mktemp + local LAST_ERR=/tmp/tmp.FBp5THkZXQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vrqxLah7Hm Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1" modified. + cat /tmp/tmp.FBp5THkZXQ + rm /tmp/tmp.vrqxLah7Hm /tmp/tmp.FBp5THkZXQ + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.vzSnQHqBX8 ++ mktemp + local LAST_ERR=/tmp/tmp.WvFqKQ9M7a + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vzSnQHqBX8 customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.WvFqKQ9M7a + rm /tmp/tmp.vzSnQHqBX8 /tmp/tmp.WvFqKQ9M7a + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.wNIZZssG3F ++ mktemp + local LAST_ERR=/tmp/tmp.CG2UBAIe7Q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wNIZZssG3F clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.CG2UBAIe7Q + rm /tmp/tmp.wNIZZssG3F /tmp/tmp.CG2UBAIe7Q + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1574-f53081a3") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.bBQrpusZHO ++ mktemp + local LAST_ERR=/tmp/tmp.rC2058ggfg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.bBQrpusZHO deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.rC2058ggfg + rm /tmp/tmp.bBQrpusZHO /tmp/tmp.rC2058ggfg + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.rVQsqHdJud +++ mktemp ++ local LAST_ERR=/tmp/tmp.sHqFuk0x98 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rVQsqHdJud ++ cat /tmp/tmp.sHqFuk0x98 ++ rm /tmp/tmp.rVQsqHdJud /tmp/tmp.sHqFuk0x98 ++ return 0 + wait_pod percona-server-mongodb-operator-599f484889-2snv5 + local pod=percona-server-mongodb-operator-599f484889-2snv5 + set +o xtrace waiting for pod/percona-server-mongodb-operator-599f484889-2snv5 to be ready.OK + create_namespace monitoring-2-0-19620 + local namespace=monitoring-2-0-19620 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces monitoring-2-0-19620' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-19620 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-19620 --ignore-not-found + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.wamKsXPfQC + local LAST_OUT=/tmp/tmp.aJPRYWSDY7 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.HiJK73Ld2L + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.yBO6YyiCCr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-19620 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.wamKsXPfQC + cat /tmp/tmp.HiJK73Ld2L + rm /tmp/tmp.wamKsXPfQC /tmp/tmp.HiJK73Ld2L + return 0 error: resource(s) were provided, but no name was specified + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aJPRYWSDY7 + cat /tmp/tmp.yBO6YyiCCr + rm /tmp/tmp.aJPRYWSDY7 /tmp/tmp.yBO6YyiCCr + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-19620 ++ mktemp + local LAST_OUT=/tmp/tmp.rqrz8rJ9r5 ++ mktemp + local LAST_ERR=/tmp/tmp.s8KyYFvn7n + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-19620 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rqrz8rJ9r5 + cat /tmp/tmp.s8KyYFvn7n + rm /tmp/tmp.rqrz8rJ9r5 /tmp/tmp.s8KyYFvn7n + return 0 + desc 'create namespace monitoring-2-0-19620' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-19620 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-19620 ++ mktemp + local LAST_OUT=/tmp/tmp.jAeuj9Bgp6 ++ mktemp + local LAST_ERR=/tmp/tmp.BBvEHCS9bu + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-19620 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jAeuj9Bgp6 namespace/monitoring-2-0-19620 created + cat /tmp/tmp.BBvEHCS9bu + rm /tmp/tmp.jAeuj9Bgp6 /tmp/tmp.BBvEHCS9bu + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.30wgaQK48h +++ mktemp ++ local LAST_ERR=/tmp/tmp.F8wX6VnnfD ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.30wgaQK48h ++ cat /tmp/tmp.F8wX6VnnfD ++ rm /tmp/tmp.30wgaQK48h /tmp/tmp.F8wX6VnnfD ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1 --namespace=monitoring-2-0-19620 ++ mktemp + local LAST_OUT=/tmp/tmp.O52vpZkPxx ++ mktemp + local LAST_ERR=/tmp/tmp.LJQj2arlVP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1 --namespace=monitoring-2-0-19620 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O52vpZkPxx Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1574-f53081a3-6-cluster1" modified. + cat /tmp/tmp.LJQj2arlVP + rm /tmp/tmp.O52vpZkPxx /tmp/tmp.LJQj2arlVP + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.dgzvbdZ8K4 ++ mktemp + local LAST_ERR=/tmp/tmp.72Bvc1sXAP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dgzvbdZ8K4 namespace/cert-manager created + cat /tmp/tmp.72Bvc1sXAP + rm /tmp/tmp.dgzvbdZ8K4 /tmp/tmp.72Bvc1sXAP + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.3cTlvSgnDK ++ mktemp + local LAST_ERR=/tmp/tmp.v9qyqKoqSy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3cTlvSgnDK namespace/cert-manager labeled + cat /tmp/tmp.v9qyqKoqSy + rm /tmp/tmp.3cTlvSgnDK /tmp/tmp.v9qyqKoqSy + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.7cPvjlp7FD ++ mktemp + local LAST_ERR=/tmp/tmp.EFe5JfRz3k + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7cPvjlp7FD namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.EFe5JfRz3k Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.7cPvjlp7FD /tmp/tmp.EFe5JfRz3k + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.zGXmxwHPw7 ++ mktemp + local LAST_ERR=/tmp/tmp.PdAT4jSSro + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zGXmxwHPw7 pod/cert-manager-5658d944df-wsjxk condition met pod/cert-manager-cainjector-cb99ff845-lqkfn condition met pod/cert-manager-webhook-7fd74b8dc7-nbr7v condition met + cat /tmp/tmp.PdAT4jSSro + rm /tmp/tmp.zGXmxwHPw7 /tmp/tmp.PdAT4jSSro + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Mon Jul 8 06:04:25 2024 NAMESPACE: monitoring-2-0-19620 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-19620.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.lV4H1wfWV1 ++ mktemp + local LAST_ERR=/tmp/tmp.xRiQuoJu23 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lV4H1wfWV1 + cat /tmp/tmp.xRiQuoJu23 + rm /tmp/tmp.lV4H1wfWV1 /tmp/tmp.xRiQuoJu23 + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.VXnEUzQ4sj ++ mktemp + local LAST_ERR=/tmp/tmp.qy72jmj10G + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VXnEUzQ4sj secret/some-users created secret/some-users unchanged + cat /tmp/tmp.qy72jmj10G + rm /tmp/tmp.VXnEUzQ4sj /tmp/tmp.qy72jmj10G + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.qMKRQTlStz ++ mktemp + local LAST_ERR=/tmp/tmp.pETRrckmTC + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qMKRQTlStz deployment.apps/psmdb-client created + cat /tmp/tmp.pETRrckmTC + rm /tmp/tmp.qMKRQTlStz /tmp/tmp.pETRrckmTC + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' ++ mktemp + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1574-f53081a3"' + yq eval '.spec.upgradeOptions.apply="Never"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + local LAST_OUT=/tmp/tmp.xp9LscviB4 ++ mktemp + local LAST_ERR=/tmp/tmp.E59bJvHLSi + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xp9LscviB4 perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.E59bJvHLSi + rm /tmp/tmp.xp9LscviB4 /tmp/tmp.E59bJvHLSi + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready..................OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready................OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.My4aA6twAs +++ mktemp ++ local LAST_ERR=/tmp/tmp.H6xXuWOq4V ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.My4aA6twAs ++ cat /tmp/tmp.H6xXuWOq4V ++ rm /tmp/tmp.My4aA6twAs /tmp/tmp.H6xXuWOq4V ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.................OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Bf1sLykFo4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.IQnchZbotf ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Bf1sLykFo4 ++ cat /tmp/tmp.IQnchZbotf ++ rm /tmp/tmp.Bf1sLykFo4 /tmp/tmp.IQnchZbotf ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness................................ + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.FqXKnmuV2U ++ mktemp + local LAST_ERR=/tmp/tmp.3U43TfFdDL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.FqXKnmuV2U + cat /tmp/tmp.3U43TfFdDL + rm /tmp/tmp.FqXKnmuV2U /tmp/tmp.3U43TfFdDL + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-19620 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-19620 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TPS5PytJYE +++ mktemp ++ local LAST_ERR=/tmp/tmp.MNtCFxpDVj ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TPS5PytJYE ++ cat /tmp/tmp.MNtCFxpDVj ++ rm /tmp/tmp.TPS5PytJYE /tmp/tmp.MNtCFxpDVj ++ return 0 + local client_container=psmdb-client-6cd48df8b6-j9lhd + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.ryLeEGL2uQ ++ mktemp + local LAST_ERR=/tmp/tmp.Gy8e2yTqB9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ryLeEGL2uQ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-19620.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-08T06:08:48.359Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("ae7dad80-7dca-49e7-91dc-0d1302e33b3d") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.Gy8e2yTqB9 + rm /tmp/tmp.ryLeEGL2uQ /tmp/tmp.Gy8e2yTqB9 + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-19620 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-19620 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8696WwGxxr +++ mktemp ++ local LAST_ERR=/tmp/tmp.X5tsldRVr9 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.8696WwGxxr ++ cat /tmp/tmp.X5tsldRVr9 ++ rm /tmp/tmp.8696WwGxxr /tmp/tmp.X5tsldRVr9 ++ return 0 + local client_container=psmdb-client-6cd48df8b6-j9lhd + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.31ZdKS2kJq ++ mktemp + local LAST_ERR=/tmp/tmp.ZRKgJA57FN + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.31ZdKS2kJq Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-19620.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-08T06:08:50.363Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("de662a32-7d46-45a9-a3d4-f47e1edbf1c7") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1720418930, 8), "signature" : { "hash" : BinData(0,"TE2J2wAj2WhN/TydGe8AUqfkyy0="), "keyId" : NumberLong("7389142696171929622") } }, "operationTime" : Timestamp(1720418930, 2) } bye + cat /tmp/tmp.ZRKgJA57FN + rm /tmp/tmp.31ZdKS2kJq /tmp/tmp.ZRKgJA57FN + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-19620 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-19620 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.5H90LCP40f +++ mktemp ++ local LAST_ERR=/tmp/tmp.a0iurlVoFb ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.5H90LCP40f ++ cat /tmp/tmp.a0iurlVoFb ++ rm /tmp/tmp.5H90LCP40f /tmp/tmp.a0iurlVoFb ++ return 0 + local client_container=psmdb-client-6cd48df8b6-j9lhd + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.94F6Z2ncxo ++ mktemp + local LAST_ERR=/tmp/tmp.MRXmkQYBhv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.94F6Z2ncxo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-19620.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-08T06:08:52.391Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("b9363285-31aa-4b6d-b66e-52f7d38a8a04") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.MRXmkQYBhv + rm /tmp/tmp.94F6Z2ncxo /tmp/tmp.MRXmkQYBhv + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-19620 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-19620 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NjP8TSsJ4h +++ mktemp ++ local LAST_ERR=/tmp/tmp.qwihrtAVUd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NjP8TSsJ4h ++ cat /tmp/tmp.qwihrtAVUd ++ rm /tmp/tmp.NjP8TSsJ4h /tmp/tmp.qwihrtAVUd ++ return 0 + local client_container=psmdb-client-6cd48df8b6-j9lhd + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.7ujSD67sjZ ++ mktemp + local LAST_ERR=/tmp/tmp.BdFP7JwslB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7ujSD67sjZ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-19620.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-08T06:08:54.420Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c568d936-34b4-4055-8486-431b2d5bfeec") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.BdFP7JwslB + rm /tmp/tmp.7ujSD67sjZ /tmp/tmp.BdFP7JwslB + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-19620 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-19620 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K1VAhkOwlU +++ mktemp ++ local LAST_ERR=/tmp/tmp.alXv4rqmvs ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.K1VAhkOwlU ++ cat /tmp/tmp.alXv4rqmvs ++ rm /tmp/tmp.K1VAhkOwlU /tmp/tmp.alXv4rqmvs ++ return 0 + local client_container=psmdb-client-6cd48df8b6-j9lhd + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.RxrQPvQBfR ++ mktemp + local LAST_ERR=/tmp/tmp.PU8UHGjQdG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6cd48df8b6-j9lhd -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-19620.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RxrQPvQBfR Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-19620.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-08T06:08:56.375Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fc3aee02-fc49-446f-b160-3fcd8390c51e") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.PU8UHGjQdG + rm /tmp/tmp.RxrQPvQBfR /tmp/tmp.PU8UHGjQdG + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.V94wEYt8n5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Qzh9AYK74l ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.V94wEYt8n5 ++++ cat /tmp/tmp.Qzh9AYK74l ++++ rm /tmp/tmp.V94wEYt8n5 /tmp/tmp.Qzh9AYK74l ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NzedQ5HRc7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IB8WrtVTHm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.NzedQ5HRc7 ++++ cat /tmp/tmp.IB8WrtVTHm ++++ rm /tmp/tmp.NzedQ5HRc7 /tmp/tmp.IB8WrtVTHm ++++ return 0 +++ local ip=34.27.103.163 +++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' +++ echo 34.27.103.163 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.27.103.163/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 255 77 --:--:-- --:--:-- --:--:-- 333 100 155 100 119 100 36 255 77 --:--:-- --:--:-- --:--:-- 333 + API_KEY='"eyJrIjoid01DcU91U205dkVhRUhHWEczaHhtejhxRjVyMXMxdEQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoid01DcU91U205dkVhRUhHWEczaHhtejhxRjVyMXMxdEQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.rSEnQoP5JR ++ mktemp + local LAST_ERR=/tmp/tmp.SFEMoEyNjr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoid01DcU91U205dkVhRUhHWEczaHhtejhxRjVyMXMxdEQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rSEnQoP5JR secret/some-users patched + cat /tmp/tmp.SFEMoEyNjr + rm /tmp/tmp.rSEnQoP5JR /tmp/tmp.SFEMoEyNjr + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KBDmxPMddD +++ mktemp ++ local LAST_ERR=/tmp/tmp.ybQgImjnCU ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KBDmxPMddD ++ cat /tmp/tmp.ybQgImjnCU ++ rm /tmp/tmp.KBDmxPMddD /tmp/tmp.ybQgImjnCU ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.e1O2l2Xoek +++ mktemp ++ local LAST_ERR=/tmp/tmp.wfaGn2v8eO ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.e1O2l2Xoek ++ cat /tmp/tmp.wfaGn2v8eO ++ rm /tmp/tmp.e1O2l2Xoek /tmp/tmp.wfaGn2v8eO ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................................................................................................................................. + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.3tn5WWT1sz ++ mktemp + local LAST_ERR=/tmp/tmp.YBvwmtGG9V + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3tn5WWT1sz + cat /tmp/tmp.YBvwmtGG9V + rm /tmp/tmp.3tn5WWT1sz /tmp/tmp.YBvwmtGG9V + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.OHQaPvSwd0/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.5NK4KFGwzj ++ mktemp + local LAST_ERR=/tmp/tmp.6S1jtky6GB + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5NK4KFGwzj + cat /tmp/tmp.6S1jtky6GB + rm /tmp/tmp.5NK4KFGwzj /tmp/tmp.6S1jtky6GB + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/service_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.OHQaPvSwd0/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.OHQaPvSwd0/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.KimB2m3Omy ++ mktemp + local LAST_ERR=/tmp/tmp.bIPIldHm4t + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.KimB2m3Omy + cat /tmp/tmp.bIPIldHm4t + rm /tmp/tmp.KimB2m3Omy /tmp/tmp.bIPIldHm4t + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.OHQaPvSwd0/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.OHQaPvSwd0/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.RBNElGEsDp ++ mktemp + local LAST_ERR=/tmp/tmp.U9jUTVUl6H + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RBNElGEsDp + cat /tmp/tmp.U9jUTVUl6H + rm /tmp/tmp.RBNElGEsDp /tmp/tmp.U9jUTVUl6H + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.OHQaPvSwd0/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-19620", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.kpiKRbyacT ++ mktemp + local LAST_ERR=/tmp/tmp.bNgQDBFYVp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kpiKRbyacT + cat /tmp/tmp.bNgQDBFYVp + rm /tmp/tmp.kpiKRbyacT /tmp/tmp.bNgQDBFYVp + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.OHQaPvSwd0/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-19620-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-19620-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720419164 ++ /usr/bin/date -u +%s + local end=1720419224 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ffeb53jzP3 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mJxVhx0FeS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Ffeb53jzP3 +++ cat /tmp/tmp.mJxVhx0FeS +++ rm /tmp/tmp.Ffeb53jzP3 /tmp/tmp.mJxVhx0FeS +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TgARxgIlij ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ikF3xgHPj5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TgARxgIlij +++ cat /tmp/tmp.ikF3xgHPj5 +++ rm /tmp/tmp.TgARxgIlij /tmp/tmp.ikF3xgHPj5 +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + local endpoint=34.27.103.163 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.27.103.163/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-rs0-1%22%7D%29&start=1720419164&end=1720419224&step=60' "1720417277" "1720417277" + get_metric_values mongodb_connections monitoring-2-0-19620-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-19620-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720419165 ++ /usr/bin/date -u +%s + local end=1720419225 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.R55rzpVQgh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.omLfp56qtQ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.R55rzpVQgh +++ cat /tmp/tmp.omLfp56qtQ +++ rm /tmp/tmp.R55rzpVQgh /tmp/tmp.omLfp56qtQ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2h4QduGxsJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PzQpUFxDF0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.2h4QduGxsJ +++ cat /tmp/tmp.PzQpUFxDF0 +++ rm /tmp/tmp.2h4QduGxsJ /tmp/tmp.PzQpUFxDF0 +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + local endpoint=34.27.103.163 + curl -s -k 'https://admin:admin@34.27.103.163/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-rs0-1%22%7D%29&start=1720419165&end=1720419225&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-19620-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-19620-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720419166 ++ /usr/bin/date -u +%s + local end=1720419226 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZzNCA44CdO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.LbsezbtpF4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ZzNCA44CdO +++ cat /tmp/tmp.LbsezbtpF4 +++ rm /tmp/tmp.ZzNCA44CdO /tmp/tmp.LbsezbtpF4 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.L16uLsMgAa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dgJchN2DKH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.L16uLsMgAa +++ cat /tmp/tmp.dgJchN2DKH +++ rm /tmp/tmp.L16uLsMgAa /tmp/tmp.dgJchN2DKH +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + local endpoint=34.27.103.163 + curl -s -k 'https://admin:admin@34.27.103.163/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-cfg-1%22%7D%29&start=1720419166&end=1720419226&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "1720417269" "1720417269" + get_metric_values mongodb_connections monitoring-2-0-19620-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-19620-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720419168 ++ /usr/bin/date -u +%s + local end=1720419228 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nKww0tZUSl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nXYTjaXChv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.nKww0tZUSl +++ cat /tmp/tmp.nXYTjaXChv +++ rm /tmp/tmp.nKww0tZUSl /tmp/tmp.nXYTjaXChv +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kppgrmNvSU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bXGDeR9PvP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.kppgrmNvSU +++ cat /tmp/tmp.bXGDeR9PvP +++ rm /tmp/tmp.kppgrmNvSU /tmp/tmp.bXGDeR9PvP +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + local endpoint=34.27.103.163 + curl -s -k 'https://admin:admin@34.27.103.163/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-cfg-1%22%7D%29&start=1720419168&end=1720419228&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-19620-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-19620-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720419169 ++ /usr/bin/date -u +%s + local end=1720419229 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aDNQd1GxwG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Gf6jZr0D24 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aDNQd1GxwG +++ cat /tmp/tmp.Gf6jZr0D24 +++ rm /tmp/tmp.aDNQd1GxwG /tmp/tmp.Gf6jZr0D24 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5MPgMHu1Bq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.AaxUyU95Vw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.5MPgMHu1Bq +++ cat /tmp/tmp.AaxUyU95Vw +++ rm /tmp/tmp.5MPgMHu1Bq /tmp/tmp.AaxUyU95Vw +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + local endpoint=34.27.103.163 + curl -s -k 'https://admin:admin@34.27.103.163/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-19620-monitoring-mongos-0%22%7D%29&start=1720419169&end=1720419229&step=60' + jq '.data.result[0].values[][1]' + grep '^"[0-9]' "1720417277" "1720417277" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-07T18:15:21+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-08T06:15:21+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MqCZj1kulK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Ci7RBodWv6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MqCZj1kulK +++ cat /tmp/tmp.Ci7RBodWv6 +++ rm /tmp/tmp.MqCZj1kulK /tmp/tmp.Ci7RBodWv6 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MD5DxUGEOP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P6cSL69Ymq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MD5DxUGEOP +++ cat /tmp/tmp.P6cSL69Ymq +++ rm /tmp/tmp.MD5DxUGEOP /tmp/tmp.P6cSL69Ymq +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + endpoint=34.27.103.163 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.27.103.163/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "FIND version" "FIND system.version _id" "FIND oplog.rs"' + rm -f payload.json + [[ "TOTAL" "FIND version" "FIND system.version _id" "FIND oplog.rs" == \n\u\l\l ]] + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-07T18:15:22+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-08T06:15:22+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QcUTTvmRyI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.3VDmd1vyLW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QcUTTvmRyI +++ cat /tmp/tmp.3VDmd1vyLW +++ rm /tmp/tmp.QcUTTvmRyI /tmp/tmp.3VDmd1vyLW +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NiHWIIkVpV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wQFOfZEtOg +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.NiHWIIkVpV +++ cat /tmp/tmp.wQFOfZEtOg +++ rm /tmp/tmp.NiHWIIkVpV /tmp/tmp.wQFOfZEtOg +++ return 0 ++ local ip=34.27.103.163 ++ '[' -n 34.27.103.163 -a 34.27.103.163 '!=' null ']' ++ echo 34.27.103.163 ++ return + endpoint=34.27.103.163 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.27.103.163/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version"' + rm -f payload.json + [[ "TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version" == \n\u\l\l ]] + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dRrmzAd2AO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z44OLB9KN6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.dRrmzAd2AO +++ cat /tmp/tmp.Z44OLB9KN6 +++ rm /tmp/tmp.dRrmzAd2AO /tmp/tmp.Z44OLB9KN6 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XcOd3MLIMa ++++ mktemp +++ local LAST_ERR=/tmp/tmp.lTRMuy6LA6 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.XcOd3MLIMa +++ cat /tmp/tmp.lTRMuy6LA6 +++ rm /tmp/tmp.XcOd3MLIMa /tmp/tmp.lTRMuy6LA6 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.X1Ecth3rg4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ODnospdzAm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.X1Ecth3rg4 +++ cat /tmp/tmp.ODnospdzAm +++ rm /tmp/tmp.X1Ecth3rg4 /tmp/tmp.ODnospdzAm +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.59I7AIG6RG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1uniUBMtj3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.59I7AIG6RG +++ cat /tmp/tmp.1uniUBMtj3 +++ rm /tmp/tmp.59I7AIG6RG /tmp/tmp.1uniUBMtj3 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xhO3XYxBKO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EocWPbfL32 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xhO3XYxBKO +++ cat /tmp/tmp.EocWPbfL32 +++ rm /tmp/tmp.xhO3XYxBKO /tmp/tmp.EocWPbfL32 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.asTQi4VEZk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rxUfVGsYaU +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.asTQi4VEZk +++ cat /tmp/tmp.rxUfVGsYaU +++ rm /tmp/tmp.asTQi4VEZk /tmp/tmp.rxUfVGsYaU +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QzUSreQdcG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VMW5JuztQF +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QzUSreQdcG +++ cat /tmp/tmp.VMW5JuztQF +++ rm /tmp/tmp.QzUSreQdcG /tmp/tmp.VMW5JuztQF +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qed8rldzCP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YFGMuIgaCv +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Qed8rldzCP +++ cat /tmp/tmp.YFGMuIgaCv +++ rm /tmp/tmp.Qed8rldzCP /tmp/tmp.YFGMuIgaCv +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Sg4TuWGZfY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iq0KHfl5js +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Sg4TuWGZfY +++ cat /tmp/tmp.iq0KHfl5js +++ rm /tmp/tmp.Sg4TuWGZfY /tmp/tmp.iq0KHfl5js +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x5tMQnB34G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2cGchGPWIP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.x5tMQnB34G +++ cat /tmp/tmp.2cGchGPWIP +++ rm /tmp/tmp.x5tMQnB34G /tmp/tmp.2cGchGPWIP +++ return 0 ++ echo /node_id/66283ce5-3b8e-4f7f-ad3a-bd87f4a8aa44 /node_id/ce153a8b-963d-4101-a878-15e02febb2d9 /node_id/b47b338f-fcd8-4cc0-9fc9-6cb26033086a /node_id/b5468820-4c09-47a7-9459-c94f043d523d /node_id/d247a668-f67d-437c-83d2-b554c4b459f8 /node_id/156171f0-3700-44be-addd-d98a316a6fe0 /node_id/368d6a66-9a37-4193-b972-d5ce524eaa78 /node_id/012cf655-feb0-4176-8b9e-90b5d77ae348 /node_id/429e5097-8e4d-482e-ae6b-1f935b8a88ea + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/66283ce5-3b8e-4f7f-ad3a-bd87f4a8aa44 /node_id/ce153a8b-963d-4101-a878-15e02febb2d9 /node_id/b47b338f-fcd8-4cc0-9fc9-6cb26033086a /node_id/b5468820-4c09-47a7-9459-c94f043d523d /node_id/d247a668-f67d-437c-83d2-b554c4b459f8 /node_id/156171f0-3700-44be-addd-d98a316a6fe0 /node_id/368d6a66-9a37-4193-b972-d5ce524eaa78 /node_id/012cf655-feb0-4176-8b9e-90b5d77ae348 /node_id/429e5097-8e4d-482e-ae6b-1f935b8a88ea ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/66283ce5-3b8e-4f7f-ad3a-bd87f4a8aa44 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6lUsSyhzb2 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ehUO8ka38d +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6lUsSyhzb2 +++++ cat /tmp/tmp.ehUO8ka38d Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6lUsSyhzb2 +++++ cat /tmp/tmp.ehUO8ka38d Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6lUsSyhzb2 +++++ cat /tmp/tmp.ehUO8ka38d Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.6lUsSyhzb2 +++++ cat /tmp/tmp.ehUO8ka38d Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.6lUsSyhzb2 /tmp/tmp.ehUO8ka38d +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n2Ba9raTwL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sydtHRbWq8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n2Ba9raTwL +++ cat /tmp/tmp.sydtHRbWq8 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n2Ba9raTwL +++ cat /tmp/tmp.sydtHRbWq8 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.n2Ba9raTwL +++ cat /tmp/tmp.sydtHRbWq8 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.n2Ba9raTwL +++ cat /tmp/tmp.sydtHRbWq8 command terminated with exit code 1 +++ rm /tmp/tmp.n2Ba9raTwL /tmp/tmp.sydtHRbWq8 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ce153a8b-963d-4101-a878-15e02febb2d9 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.0jJr4Bheii ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.OV4juuyymR +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0jJr4Bheii +++++ cat /tmp/tmp.OV4juuyymR Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0jJr4Bheii +++++ cat /tmp/tmp.OV4juuyymR Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.0jJr4Bheii +++++ cat /tmp/tmp.OV4juuyymR Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.0jJr4Bheii +++++ cat /tmp/tmp.OV4juuyymR Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.0jJr4Bheii /tmp/tmp.OV4juuyymR +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0YRKNmoWlG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xNx3tuH3kp +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0YRKNmoWlG +++ cat /tmp/tmp.xNx3tuH3kp command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0YRKNmoWlG +++ cat /tmp/tmp.xNx3tuH3kp command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0YRKNmoWlG +++ cat /tmp/tmp.xNx3tuH3kp command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.0YRKNmoWlG +++ cat /tmp/tmp.xNx3tuH3kp command terminated with exit code 1 +++ rm /tmp/tmp.0YRKNmoWlG /tmp/tmp.xNx3tuH3kp +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b47b338f-fcd8-4cc0-9fc9-6cb26033086a +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.RTdERlcPO9 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NW77XkhYF7 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RTdERlcPO9 +++++ cat /tmp/tmp.NW77XkhYF7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RTdERlcPO9 +++++ cat /tmp/tmp.NW77XkhYF7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RTdERlcPO9 +++++ cat /tmp/tmp.NW77XkhYF7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.RTdERlcPO9 +++++ cat /tmp/tmp.NW77XkhYF7 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.RTdERlcPO9 /tmp/tmp.NW77XkhYF7 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z6Mz4XOAod ++++ mktemp +++ local LAST_ERR=/tmp/tmp.33z5dB0nO3 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.z6Mz4XOAod +++ cat /tmp/tmp.33z5dB0nO3 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.z6Mz4XOAod +++ cat /tmp/tmp.33z5dB0nO3 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.z6Mz4XOAod +++ cat /tmp/tmp.33z5dB0nO3 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.z6Mz4XOAod +++ cat /tmp/tmp.33z5dB0nO3 command terminated with exit code 1 +++ rm /tmp/tmp.z6Mz4XOAod /tmp/tmp.33z5dB0nO3 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b5468820-4c09-47a7-9459-c94f043d523d ++++ get_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.EbmOzqQp2b ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.8im568J4dl +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EbmOzqQp2b +++++ cat /tmp/tmp.8im568J4dl Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EbmOzqQp2b +++++ cat /tmp/tmp.8im568J4dl Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EbmOzqQp2b +++++ cat /tmp/tmp.8im568J4dl Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.EbmOzqQp2b +++++ cat /tmp/tmp.8im568J4dl Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.EbmOzqQp2b /tmp/tmp.8im568J4dl +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Q1H1YfMPIc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HvWseIkvGE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Q1H1YfMPIc +++ cat /tmp/tmp.HvWseIkvGE command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Q1H1YfMPIc +++ cat /tmp/tmp.HvWseIkvGE command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Q1H1YfMPIc +++ cat /tmp/tmp.HvWseIkvGE command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Q1H1YfMPIc +++ cat /tmp/tmp.HvWseIkvGE command terminated with exit code 1 +++ rm /tmp/tmp.Q1H1YfMPIc /tmp/tmp.HvWseIkvGE +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d247a668-f67d-437c-83d2-b554c4b459f8 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.2G4oNNCtcS ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.SWJxxlmkKm +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.2G4oNNCtcS +++++ cat /tmp/tmp.SWJxxlmkKm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.2G4oNNCtcS +++++ cat /tmp/tmp.SWJxxlmkKm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.2G4oNNCtcS +++++ cat /tmp/tmp.SWJxxlmkKm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.2G4oNNCtcS +++++ cat /tmp/tmp.SWJxxlmkKm Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.2G4oNNCtcS /tmp/tmp.SWJxxlmkKm +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qXtnrW1IrY ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uyJlq5useE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.qXtnrW1IrY +++ cat /tmp/tmp.uyJlq5useE command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.qXtnrW1IrY +++ cat /tmp/tmp.uyJlq5useE command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.qXtnrW1IrY +++ cat /tmp/tmp.uyJlq5useE command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.qXtnrW1IrY +++ cat /tmp/tmp.uyJlq5useE command terminated with exit code 1 +++ rm /tmp/tmp.qXtnrW1IrY /tmp/tmp.uyJlq5useE +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/156171f0-3700-44be-addd-d98a316a6fe0 ++++ get_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6ijDR496yw ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.WHUrsdbGx9 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6ijDR496yw +++++ cat /tmp/tmp.WHUrsdbGx9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6ijDR496yw +++++ cat /tmp/tmp.WHUrsdbGx9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6ijDR496yw +++++ cat /tmp/tmp.WHUrsdbGx9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.6ijDR496yw +++++ cat /tmp/tmp.WHUrsdbGx9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.6ijDR496yw /tmp/tmp.WHUrsdbGx9 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Z12kIVDwuN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uRdK5bTOMH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z12kIVDwuN +++ cat /tmp/tmp.uRdK5bTOMH command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z12kIVDwuN +++ cat /tmp/tmp.uRdK5bTOMH command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.Z12kIVDwuN +++ cat /tmp/tmp.uRdK5bTOMH command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.Z12kIVDwuN +++ cat /tmp/tmp.uRdK5bTOMH command terminated with exit code 1 +++ rm /tmp/tmp.Z12kIVDwuN /tmp/tmp.uRdK5bTOMH +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/368d6a66-9a37-4193-b972-d5ce524eaa78 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.UX204c0v8f ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.nr0Z1rX8os +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UX204c0v8f +++++ cat /tmp/tmp.nr0Z1rX8os Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UX204c0v8f +++++ cat /tmp/tmp.nr0Z1rX8os Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.UX204c0v8f +++++ cat /tmp/tmp.nr0Z1rX8os Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.UX204c0v8f +++++ cat /tmp/tmp.nr0Z1rX8os Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.UX204c0v8f /tmp/tmp.nr0Z1rX8os +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vxbWpGgra6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2UPylR8Gyj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.vxbWpGgra6 +++ cat /tmp/tmp.2UPylR8Gyj command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.vxbWpGgra6 +++ cat /tmp/tmp.2UPylR8Gyj command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.vxbWpGgra6 +++ cat /tmp/tmp.2UPylR8Gyj command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.vxbWpGgra6 +++ cat /tmp/tmp.2UPylR8Gyj command terminated with exit code 1 +++ rm /tmp/tmp.vxbWpGgra6 /tmp/tmp.2UPylR8Gyj +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/012cf655-feb0-4176-8b9e-90b5d77ae348 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.g8mcNgMYZk ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.dqayn9S2rM +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g8mcNgMYZk +++++ cat /tmp/tmp.dqayn9S2rM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g8mcNgMYZk +++++ cat /tmp/tmp.dqayn9S2rM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g8mcNgMYZk +++++ cat /tmp/tmp.dqayn9S2rM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.g8mcNgMYZk +++++ cat /tmp/tmp.dqayn9S2rM Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.g8mcNgMYZk /tmp/tmp.dqayn9S2rM +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.w8ZcM1Um4G ++++ mktemp +++ local LAST_ERR=/tmp/tmp.47yBZFehkK +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8ZcM1Um4G +++ cat /tmp/tmp.47yBZFehkK command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8ZcM1Um4G +++ cat /tmp/tmp.47yBZFehkK command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.w8ZcM1Um4G +++ cat /tmp/tmp.47yBZFehkK command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.w8ZcM1Um4G +++ cat /tmp/tmp.47yBZFehkK command terminated with exit code 1 +++ rm /tmp/tmp.w8ZcM1Um4G /tmp/tmp.47yBZFehkK +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/429e5097-8e4d-482e-ae6b-1f935b8a88ea +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.et5XrXGqxS ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.R7smtHcsEc +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.et5XrXGqxS +++++ cat /tmp/tmp.R7smtHcsEc Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.et5XrXGqxS +++++ cat /tmp/tmp.R7smtHcsEc Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.et5XrXGqxS +++++ cat /tmp/tmp.R7smtHcsEc Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.et5XrXGqxS +++++ cat /tmp/tmp.R7smtHcsEc Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.et5XrXGqxS /tmp/tmp.R7smtHcsEc +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ckwmRzRUgm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GH8OpmC2aB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ckwmRzRUgm +++ cat /tmp/tmp.GH8OpmC2aB command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ckwmRzRUgm +++ cat /tmp/tmp.GH8OpmC2aB command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.ckwmRzRUgm +++ cat /tmp/tmp.GH8OpmC2aB command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.ckwmRzRUgm +++ cat /tmp/tmp.GH8OpmC2aB command terminated with exit code 1 +++ rm /tmp/tmp.ckwmRzRUgm /tmp/tmp.GH8OpmC2aB +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.5uehDXaDSF ++ mktemp + local LAST_ERR=/tmp/tmp.udz7RzjVzc + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.5uehDXaDSF perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.udz7RzjVzc + rm /tmp/tmp.5uehDXaDSF /tmp/tmp.udz7RzjVzc + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace pod/monitoring-mongos-0 - .........................................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace pod/monitoring-rs0-0 - ...........Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace pod/monitoring-cfg-0 - .........Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.BW99fswOr8 ++ mktemp + local LAST_ERR=/tmp/tmp.P9fuQiL03c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BW99fswOr8 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27017/TCP 15m + cat /tmp/tmp.P9fuQiL03c + rm /tmp/tmp.BW99fswOr8 /tmp/tmp.P9fuQiL03c + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.XZtTJ2araH ++ mktemp + local LAST_ERR=/tmp/tmp.FieXWq5cik + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XZtTJ2araH NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27017/TCP 15m + cat /tmp/tmp.FieXWq5cik + rm /tmp/tmp.XZtTJ2araH /tmp/tmp.FieXWq5cik + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.zbh014j1HR ++ mktemp + local LAST_ERR=/tmp/tmp.woiB2KVmK1 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zbh014j1HR NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 10.198.235.50 27017/TCP 14m + cat /tmp/tmp.woiB2KVmK1 + rm /tmp/tmp.zbh014j1HR /tmp/tmp.woiB2KVmK1 + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/66283ce5-3b8e-4f7f-ad3a-bd87f4a8aa44 /node_id/ce153a8b-963d-4101-a878-15e02febb2d9 /node_id/b47b338f-fcd8-4cc0-9fc9-6cb26033086a /node_id/b5468820-4c09-47a7-9459-c94f043d523d /node_id/d247a668-f67d-437c-83d2-b554c4b459f8 /node_id/156171f0-3700-44be-addd-d98a316a6fe0 /node_id/368d6a66-9a37-4193-b972-d5ce524eaa78 /node_id/012cf655-feb0-4176-8b9e-90b5d77ae348 /node_id/429e5097-8e4d-482e-ae6b-1f935b8a88ea ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/66283ce5-3b8e-4f7f-ad3a-bd87f4a8aa44 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.RH11C43q2m ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qeUawKfwk3 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RH11C43q2m +++++ cat /tmp/tmp.qeUawKfwk3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RH11C43q2m +++++ cat /tmp/tmp.qeUawKfwk3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.RH11C43q2m +++++ cat /tmp/tmp.qeUawKfwk3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.RH11C43q2m +++++ cat /tmp/tmp.qeUawKfwk3 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.RH11C43q2m /tmp/tmp.qeUawKfwk3 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.47BcUYHpW6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FxVpfMG3kr +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.47BcUYHpW6 +++ cat /tmp/tmp.FxVpfMG3kr command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.47BcUYHpW6 +++ cat /tmp/tmp.FxVpfMG3kr command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.47BcUYHpW6 +++ cat /tmp/tmp.FxVpfMG3kr command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.47BcUYHpW6 +++ cat /tmp/tmp.FxVpfMG3kr command terminated with exit code 1 +++ rm /tmp/tmp.47BcUYHpW6 /tmp/tmp.FxVpfMG3kr +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/ce153a8b-963d-4101-a878-15e02febb2d9 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.JvgmFpCgM7 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NIZE2X7kgU +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JvgmFpCgM7 +++++ cat /tmp/tmp.NIZE2X7kgU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JvgmFpCgM7 +++++ cat /tmp/tmp.NIZE2X7kgU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.JvgmFpCgM7 +++++ cat /tmp/tmp.NIZE2X7kgU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.JvgmFpCgM7 +++++ cat /tmp/tmp.NIZE2X7kgU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.JvgmFpCgM7 /tmp/tmp.NIZE2X7kgU +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YazSrR9BBh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FO6pziVrbz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YazSrR9BBh +++ cat /tmp/tmp.FO6pziVrbz command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YazSrR9BBh +++ cat /tmp/tmp.FO6pziVrbz command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YazSrR9BBh +++ cat /tmp/tmp.FO6pziVrbz command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.YazSrR9BBh +++ cat /tmp/tmp.FO6pziVrbz command terminated with exit code 1 +++ rm /tmp/tmp.YazSrR9BBh /tmp/tmp.FO6pziVrbz +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b47b338f-fcd8-4cc0-9fc9-6cb26033086a +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.3lanY14etn ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.0w14Fgb3TG +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.3lanY14etn +++++ cat /tmp/tmp.0w14Fgb3TG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.3lanY14etn +++++ cat /tmp/tmp.0w14Fgb3TG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.3lanY14etn +++++ cat /tmp/tmp.0w14Fgb3TG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.3lanY14etn +++++ cat /tmp/tmp.0w14Fgb3TG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.3lanY14etn /tmp/tmp.0w14Fgb3TG +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eLsOvQZGh1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aAJ8SpmmcE +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eLsOvQZGh1 +++ cat /tmp/tmp.aAJ8SpmmcE command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eLsOvQZGh1 +++ cat /tmp/tmp.aAJ8SpmmcE command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.eLsOvQZGh1 +++ cat /tmp/tmp.aAJ8SpmmcE command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.eLsOvQZGh1 +++ cat /tmp/tmp.aAJ8SpmmcE command terminated with exit code 1 +++ rm /tmp/tmp.eLsOvQZGh1 /tmp/tmp.aAJ8SpmmcE +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b5468820-4c09-47a7-9459-c94f043d523d ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++ awk '{print $4}' +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.27ryMsp15O ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.TQbVkIhjbh +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.27ryMsp15O +++++ cat /tmp/tmp.TQbVkIhjbh Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.27ryMsp15O +++++ cat /tmp/tmp.TQbVkIhjbh Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.27ryMsp15O +++++ cat /tmp/tmp.TQbVkIhjbh Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.27ryMsp15O +++++ cat /tmp/tmp.TQbVkIhjbh Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.27ryMsp15O /tmp/tmp.TQbVkIhjbh +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zYm19kTHXW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mQD04Qqu9a +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zYm19kTHXW +++ cat /tmp/tmp.mQD04Qqu9a command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zYm19kTHXW +++ cat /tmp/tmp.mQD04Qqu9a command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.zYm19kTHXW +++ cat /tmp/tmp.mQD04Qqu9a command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.zYm19kTHXW +++ cat /tmp/tmp.mQD04Qqu9a command terminated with exit code 1 +++ rm /tmp/tmp.zYm19kTHXW /tmp/tmp.mQD04Qqu9a +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d247a668-f67d-437c-83d2-b554c4b459f8 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.StrQZOdm4y ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hC1A94g9p5 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.StrQZOdm4y +++++ cat /tmp/tmp.hC1A94g9p5 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.StrQZOdm4y +++++ cat /tmp/tmp.hC1A94g9p5 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.StrQZOdm4y +++++ cat /tmp/tmp.hC1A94g9p5 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.StrQZOdm4y +++++ cat /tmp/tmp.hC1A94g9p5 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.StrQZOdm4y /tmp/tmp.hC1A94g9p5 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.62Z3yTgxVc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oe3bmLZuVN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.62Z3yTgxVc +++ cat /tmp/tmp.oe3bmLZuVN command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.62Z3yTgxVc +++ cat /tmp/tmp.oe3bmLZuVN command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.62Z3yTgxVc +++ cat /tmp/tmp.oe3bmLZuVN command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.62Z3yTgxVc +++ cat /tmp/tmp.oe3bmLZuVN command terminated with exit code 1 +++ rm /tmp/tmp.62Z3yTgxVc /tmp/tmp.oe3bmLZuVN +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/156171f0-3700-44be-addd-d98a316a6fe0 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.tBd3ysLeVD ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Irw2dY4Nlf +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tBd3ysLeVD +++++ cat /tmp/tmp.Irw2dY4Nlf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tBd3ysLeVD +++++ cat /tmp/tmp.Irw2dY4Nlf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.tBd3ysLeVD +++++ cat /tmp/tmp.Irw2dY4Nlf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.tBd3ysLeVD +++++ cat /tmp/tmp.Irw2dY4Nlf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.tBd3ysLeVD /tmp/tmp.Irw2dY4Nlf +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BmVnGIG2g1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pIVUu8G7T7 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BmVnGIG2g1 +++ cat /tmp/tmp.pIVUu8G7T7 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BmVnGIG2g1 +++ cat /tmp/tmp.pIVUu8G7T7 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.BmVnGIG2g1 +++ cat /tmp/tmp.pIVUu8G7T7 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.BmVnGIG2g1 +++ cat /tmp/tmp.pIVUu8G7T7 command terminated with exit code 1 +++ rm /tmp/tmp.BmVnGIG2g1 /tmp/tmp.pIVUu8G7T7 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/368d6a66-9a37-4193-b972-d5ce524eaa78 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.5E9mbuRBx5 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.oqXxxAqxxr +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5E9mbuRBx5 +++++ cat /tmp/tmp.oqXxxAqxxr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5E9mbuRBx5 +++++ cat /tmp/tmp.oqXxxAqxxr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.5E9mbuRBx5 +++++ cat /tmp/tmp.oqXxxAqxxr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.5E9mbuRBx5 +++++ cat /tmp/tmp.oqXxxAqxxr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.5E9mbuRBx5 /tmp/tmp.oqXxxAqxxr +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8e3HhVKupd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WM9f09do3M +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8e3HhVKupd +++ cat /tmp/tmp.WM9f09do3M command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8e3HhVKupd +++ cat /tmp/tmp.WM9f09do3M command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.8e3HhVKupd +++ cat /tmp/tmp.WM9f09do3M command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.8e3HhVKupd +++ cat /tmp/tmp.WM9f09do3M command terminated with exit code 1 +++ rm /tmp/tmp.8e3HhVKupd /tmp/tmp.WM9f09do3M +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/012cf655-feb0-4176-8b9e-90b5d77ae348 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.uS6A1SWrL4 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.geojpVx72x +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uS6A1SWrL4 +++++ cat /tmp/tmp.geojpVx72x Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uS6A1SWrL4 +++++ cat /tmp/tmp.geojpVx72x Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uS6A1SWrL4 +++++ cat /tmp/tmp.geojpVx72x Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.uS6A1SWrL4 +++++ cat /tmp/tmp.geojpVx72x Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.uS6A1SWrL4 /tmp/tmp.geojpVx72x +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0dk7uAWwlx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Vvl53o4Wcf +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0dk7uAWwlx +++ cat /tmp/tmp.Vvl53o4Wcf command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0dk7uAWwlx +++ cat /tmp/tmp.Vvl53o4Wcf command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0dk7uAWwlx +++ cat /tmp/tmp.Vvl53o4Wcf command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.0dk7uAWwlx +++ cat /tmp/tmp.Vvl53o4Wcf command terminated with exit code 1 +++ rm /tmp/tmp.0dk7uAWwlx /tmp/tmp.Vvl53o4Wcf +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/429e5097-8e4d-482e-ae6b-1f935b8a88ea +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.C890qk0SUC ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.kq7DuzpMB4 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.C890qk0SUC +++++ cat /tmp/tmp.kq7DuzpMB4 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.C890qk0SUC +++++ cat /tmp/tmp.kq7DuzpMB4 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.C890qk0SUC +++++ cat /tmp/tmp.kq7DuzpMB4 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.C890qk0SUC +++++ cat /tmp/tmp.kq7DuzpMB4 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.C890qk0SUC /tmp/tmp.kq7DuzpMB4 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0ne1ysEhxE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KA3Ap9f2uP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0ne1ysEhxE +++ cat /tmp/tmp.KA3Ap9f2uP command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0ne1ysEhxE +++ cat /tmp/tmp.KA3Ap9f2uP command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-19620 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0ne1ysEhxE +++ cat /tmp/tmp.KA3Ap9f2uP command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.0ne1ysEhxE +++ cat /tmp/tmp.KA3Ap9f2uP command terminated with exit code 1 +++ rm /tmp/tmp.0ne1ysEhxE /tmp/tmp.KA3Ap9f2uP +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xEu0YjgZeI +++ mktemp ++ local LAST_ERR=/tmp/tmp.Yn01IlyEKk ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xEu0YjgZeI ++ cat /tmp/tmp.Yn01IlyEKk error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-19620" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xEu0YjgZeI ++ cat /tmp/tmp.Yn01IlyEKk error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-19620" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.xEu0YjgZeI ++ cat /tmp/tmp.Yn01IlyEKk error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-19620" ++ sleep 8 ++ cat /tmp/tmp.xEu0YjgZeI ++ cat /tmp/tmp.Yn01IlyEKk error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-19620" ++ rm /tmp/tmp.xEu0YjgZeI /tmp/tmp.Yn01IlyEKk ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ kubectl_bin get secrets -o json ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KGwRSOpTpo +++ mktemp ++ local LAST_ERR=/tmp/tmp.V5Hbsq873j ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.KGwRSOpTpo ++ cat /tmp/tmp.V5Hbsq873j ++ rm /tmp/tmp.KGwRSOpTpo /tmp/tmp.V5Hbsq873j ++ return 0 + secrets='YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AFjABXBNEq +++ mktemp ++ local LAST_ERR=/tmp/tmp.RI8swx0Ant ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.AFjABXBNEq ++ cat /tmp/tmp.RI8swx0Ant ++ rm /tmp/tmp.AFjABXBNEq /tmp/tmp.RI8swx0Ant ++ return 0 + pods='monitoring-0 psmdb-client-6cd48df8b6-j9lhd' + echo pods=monitoring-0 psmdb-client-6cd48df8b6-j9lhd pods=monitoring-0 psmdb-client-6cd48df8b6-j9lhd + collect_logs monitoring-2-0-19620 + local containers + local count + NS=monitoring-2-0-19620 + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-19620 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YacqxqwWG4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.irUBMkuPZ6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-19620 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.YacqxqwWG4 ++ cat /tmp/tmp.irUBMkuPZ6 ++ rm /tmp/tmp.YacqxqwWG4 /tmp/tmp.irUBMkuPZ6 ++ return 0 + containers=monitoring + for c in '$containers' + [[ monitoring =~ pmm ]] + kubectl_bin -n monitoring-2-0-19620 logs monitoring-0 -c monitoring ++ mktemp + local LAST_OUT=/tmp/tmp.agsf8RO6ec ++ mktemp + local LAST_ERR=/tmp/tmp.TIrw2RuwBa + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-19620 logs monitoring-0 -c monitoring + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.agsf8RO6ec + cat /tmp/tmp.TIrw2RuwBa + rm /tmp/tmp.agsf8RO6ec /tmp/tmp.TIrw2RuwBa + return 0 + echo logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-19620 get pod psmdb-client-6cd48df8b6-j9lhd -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H5rFXxezJN +++ mktemp ++ local LAST_ERR=/tmp/tmp.FdfjvenX96 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-19620 get pod psmdb-client-6cd48df8b6-j9lhd -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.H5rFXxezJN ++ cat /tmp/tmp.FdfjvenX96 ++ rm /tmp/tmp.H5rFXxezJN /tmp/tmp.FdfjvenX96 ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-2-0-19620 logs psmdb-client-6cd48df8b6-j9lhd -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.ietlfqLWKK ++ mktemp + local LAST_ERR=/tmp/tmp.qFMS7O4K9R + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-19620 logs psmdb-client-6cd48df8b6-j9lhd -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ietlfqLWKK + cat /tmp/tmp.qFMS7O4K9R + rm /tmp/tmp.ietlfqLWKK /tmp/tmp.qFMS7O4K9R + return 0 + echo logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-psmdb-client-6cd48df8b6-j9lhd-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ kubectl_bin -n psmdb-operator get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aVinjPA4gv +++ mktemp ++ local LAST_ERR=/tmp/tmp.mHSccJsmEA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.aVinjPA4gv ++ cat /tmp/tmp.mHSccJsmEA ++ rm /tmp/tmp.aVinjPA4gv /tmp/tmp.mHSccJsmEA ++ return 0 + pods=percona-server-mongodb-operator-599f484889-2snv5 + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-599f484889-2snv5 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ArxajRhrFZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.XGktEc5qwa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-599f484889-2snv5 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ArxajRhrFZ ++ cat /tmp/tmp.XGktEc5qwa ++ rm /tmp/tmp.ArxajRhrFZ /tmp/tmp.XGktEc5qwa ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-599f484889-2snv5 -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.xq9xU0YcEi ++ mktemp + local LAST_ERR=/tmp/tmp.12uEy1NJ3p + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-599f484889-2snv5 -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.xq9xU0YcEi + cat /tmp/tmp.12uEy1NJ3p + rm /tmp/tmp.xq9xU0YcEi /tmp/tmp.12uEy1NJ3p + return 0 + echo logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.OHQaPvSwd0/logs_output-percona-server-mongodb-operator-599f484889-2snv5-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-19620 + local namespace=monitoring-2-0-19620 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.rjdpEEY7W5 ++ mktemp + local LAST_ERR=/tmp/tmp.Ofi2TTRyDs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rjdpEEY7W5 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.Ofi2TTRyDs + rm /tmp/tmp.rjdpEEY7W5 /tmp/tmp.Ofi2TTRyDs + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.XCcC0MPBht ++ mktemp + local LAST_ERR=/tmp/tmp.LqtVXRmxXT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.XCcC0MPBht + cat /tmp/tmp.LqtVXRmxXT + rm /tmp/tmp.XCcC0MPBht /tmp/tmp.LqtVXRmxXT + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.yRYH3NpSbw ++ mktemp + local LAST_ERR=/tmp/tmp.fBXFd4fZMP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.yRYH3NpSbw + cat /tmp/tmp.fBXFd4fZMP + rm /tmp/tmp.yRYH3NpSbw /tmp/tmp.fBXFd4fZMP + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' No resources found + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name was specified + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.YezNUIPlMJ ++ mktemp + local LAST_ERR=/tmp/tmp.HWLmezyVZh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YezNUIPlMJ + cat /tmp/tmp.HWLmezyVZh + rm /tmp/tmp.YezNUIPlMJ /tmp/tmp.HWLmezyVZh + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.EIM1v0n4d8 ++ mktemp + local LAST_ERR=/tmp/tmp.QozHP0GZSI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1574/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EIM1v0n4d8 clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.QozHP0GZSI + rm /tmp/tmp.EIM1v0n4d8 /tmp/tmp.QozHP0GZSI + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.qTXpqiFHXx ++ mktemp + local LAST_ERR=/tmp/tmp.67CX2B4vQ5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qTXpqiFHXx namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.67CX2B4vQ5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qTXpqiFHXx + cat /tmp/tmp.67CX2B4vQ5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.qTXpqiFHXx + cat /tmp/tmp.67CX2B4vQ5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.qTXpqiFHXx + cat /tmp/tmp.67CX2B4vQ5 Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.14.5/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.qTXpqiFHXx /tmp/tmp.67CX2B4vQ5 + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + rm -rf /tmp/tmp.OHQaPvSwd0 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-19620 ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.23LHfAjEJo + local LAST_OUT=/tmp/tmp.VpY6dQZ8l9 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.FybHTVRJ8P + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.i86yBbvThT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-19620