Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-21162 + local ns=monitoring-2-0-21162 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.NQMhjbaKtF ++ mktemp + local LAST_ERR=/tmp/tmp.W01Y47m49f + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NQMhjbaKtF customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.W01Y47m49f + rm /tmp/tmp.NQMhjbaKtF /tmp/tmp.W01Y47m49f + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.sAFtuba1RN ++ mktemp + local LAST_ERR=/tmp/tmp.dz5LHxKMpd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sAFtuba1RN + cat /tmp/tmp.dz5LHxKMpd + rm /tmp/tmp.sAFtuba1RN /tmp/tmp.dz5LHxKMpd + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.MzSxkVZO4D ++ mktemp + local LAST_ERR=/tmp/tmp.epgJS2lxHV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MzSxkVZO4D + cat /tmp/tmp.epgJS2lxHV + rm /tmp/tmp.MzSxkVZO4D /tmp/tmp.epgJS2lxHV + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.LcSEgerVwi ++ mktemp + local LAST_ERR=/tmp/tmp.IVbHUt3Dn9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LcSEgerVwi + cat /tmp/tmp.IVbHUt3Dn9 + rm /tmp/tmp.LcSEgerVwi /tmp/tmp.IVbHUt3Dn9 + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.M0KLAQihJz ++ mktemp + local LAST_ERR=/tmp/tmp.ubimN7epLg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M0KLAQihJz clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.ubimN7epLg + rm /tmp/tmp.M0KLAQihJz /tmp/tmp.ubimN7epLg + return 0 + check_crd_for_deletion PR-1612-57a92fde + local git_tag=PR-1612-57a92fde ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1612-57a92fde/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed s/---//g ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PID2cRqcCQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.znaC5cVlps ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PID2cRqcCQ ++ cat /tmp/tmp.znaC5cVlps Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PID2cRqcCQ ++ cat /tmp/tmp.znaC5cVlps Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.PID2cRqcCQ ++ cat /tmp/tmp.znaC5cVlps Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.PID2cRqcCQ ++ cat /tmp/tmp.znaC5cVlps Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.PID2cRqcCQ /tmp/tmp.znaC5cVlps ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + kubectl_bin get ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' ++ mktemp + local LAST_OUT=/tmp/tmp.W7Oc106CLo ++ mktemp + local LAST_ERR=/tmp/tmp.oP4gdpHpUp + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_OUT=/tmp/tmp.aF8NmHc5Lz + xargs kubectl delete ns ++ mktemp + local LAST_ERR=/tmp/tmp.ZXr5J5Xpj5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.W7Oc106CLo + cat /tmp/tmp.oP4gdpHpUp + rm /tmp/tmp.W7Oc106CLo /tmp/tmp.oP4gdpHpUp + return 0 namespace "cert-manager" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "monitoring-2-0-7420" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aF8NmHc5Lz namespace "psmdb-operator" deleted + cat /tmp/tmp.ZXr5J5Xpj5 + rm /tmp/tmp.aF8NmHc5Lz /tmp/tmp.ZXr5J5Xpj5 + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.SGZn0s9cbi ++ mktemp + local LAST_ERR=/tmp/tmp.zBoYu7qlLW + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.SGZn0s9cbi + cat /tmp/tmp.zBoYu7qlLW + rm /tmp/tmp.SGZn0s9cbi /tmp/tmp.zBoYu7qlLW + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.LROrBsFOw0 ++ mktemp + local LAST_ERR=/tmp/tmp.8d9IXFQr8X + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LROrBsFOw0 namespace/psmdb-operator created + cat /tmp/tmp.8d9IXFQr8X + rm /tmp/tmp.LROrBsFOw0 /tmp/tmp.8d9IXFQr8X + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.sDIRQy34z0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.MfeXmViAYH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.sDIRQy34z0 ++ cat /tmp/tmp.MfeXmViAYH ++ rm /tmp/tmp.sDIRQy34z0 /tmp/tmp.MfeXmViAYH ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fbhufiFuRw ++ mktemp + local LAST_ERR=/tmp/tmp.KpshEidmpZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.fbhufiFuRw Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8" modified. + cat /tmp/tmp.KpshEidmpZ + rm /tmp/tmp.fbhufiFuRw /tmp/tmp.KpshEidmpZ + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.3UXv41pFUC ++ mktemp + local LAST_ERR=/tmp/tmp.uyZEMkoq7J + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.3UXv41pFUC customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.uyZEMkoq7J + rm /tmp/tmp.3UXv41pFUC /tmp/tmp.uyZEMkoq7J + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.O5FuLf7f0j ++ mktemp + local LAST_ERR=/tmp/tmp.chZkXmh6DG + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + sed -e 's^namespace: .*^namespace: psmdb-operator^' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.O5FuLf7f0j clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.chZkXmh6DG + rm /tmp/tmp.O5FuLf7f0j /tmp/tmp.chZkXmh6DG + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1612-57a92fde") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.kDO3tumqRL ++ mktemp + local LAST_ERR=/tmp/tmp.trTmMK9301 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.kDO3tumqRL deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.trTmMK9301 + rm /tmp/tmp.kDO3tumqRL /tmp/tmp.trTmMK9301 + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y81zLPS5x5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Th0JYTRhhR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.Y81zLPS5x5 ++ cat /tmp/tmp.Th0JYTRhhR ++ rm /tmp/tmp.Y81zLPS5x5 /tmp/tmp.Th0JYTRhhR ++ return 0 + wait_pod percona-server-mongodb-operator-bdc5b774b-wb586 + local pod=percona-server-mongodb-operator-bdc5b774b-wb586 + set +o xtrace waiting for pod/percona-server-mongodb-operator-bdc5b774b-wb586 to be ready.OK + create_namespace monitoring-2-0-21162 + local namespace=monitoring-2-0-21162 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-21162' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-21162 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-21162 --ignore-not-found ++ mktemp + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.vYQMHTJRbD + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.9SWiSgTI9N + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_OUT=/tmp/tmp.NzGh2pMZ8K ++ mktemp + for i in '$(seq 0 2)' + set +e + local LAST_ERR=/tmp/tmp.YWkKDJRY5S + local exit_status=0 + local timeout=4 + kubectl delete namespace monitoring-2-0-21162 --ignore-not-found ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.NzGh2pMZ8K + cat /tmp/tmp.YWkKDJRY5S + rm /tmp/tmp.NzGh2pMZ8K /tmp/tmp.YWkKDJRY5S + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vYQMHTJRbD + cat /tmp/tmp.9SWiSgTI9N + rm /tmp/tmp.vYQMHTJRbD /tmp/tmp.9SWiSgTI9N + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-21162 ++ mktemp + local LAST_OUT=/tmp/tmp.ABLwxZdY8Y ++ mktemp + local LAST_ERR=/tmp/tmp.0r73MMg7NX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-21162 namespace "gmp-public" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ABLwxZdY8Y + cat /tmp/tmp.0r73MMg7NX + rm /tmp/tmp.ABLwxZdY8Y /tmp/tmp.0r73MMg7NX + return 0 + desc 'create namespace monitoring-2-0-21162' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-21162 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-21162 ++ mktemp + local LAST_OUT=/tmp/tmp.g7QVwnnEiE ++ mktemp + local LAST_ERR=/tmp/tmp.PEWOe9qzFH + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-21162 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.g7QVwnnEiE namespace/monitoring-2-0-21162 created + cat /tmp/tmp.PEWOe9qzFH + rm /tmp/tmp.g7QVwnnEiE /tmp/tmp.PEWOe9qzFH + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.CDp0rdD9HI +++ mktemp ++ local LAST_ERR=/tmp/tmp.PCyFSGXOxL ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.CDp0rdD9HI ++ cat /tmp/tmp.PCyFSGXOxL ++ rm /tmp/tmp.CDp0rdD9HI /tmp/tmp.PCyFSGXOxL ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8 --namespace=monitoring-2-0-21162 ++ mktemp + local LAST_OUT=/tmp/tmp.EmSHSS31ix ++ mktemp + local LAST_ERR=/tmp/tmp.5j27FMchtM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8 --namespace=monitoring-2-0-21162 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EmSHSS31ix Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1612-57a92fde-7-cluster8" modified. + cat /tmp/tmp.5j27FMchtM + rm /tmp/tmp.EmSHSS31ix /tmp/tmp.5j27FMchtM + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.8fmvRHvuq6 ++ mktemp + local LAST_ERR=/tmp/tmp.NSNDvfaIBR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8fmvRHvuq6 namespace/cert-manager created + cat /tmp/tmp.NSNDvfaIBR + rm /tmp/tmp.8fmvRHvuq6 /tmp/tmp.NSNDvfaIBR + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.QjBskMx6qU ++ mktemp + local LAST_ERR=/tmp/tmp.NdVzCFCKuk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.QjBskMx6qU namespace/cert-manager labeled + cat /tmp/tmp.NdVzCFCKuk + rm /tmp/tmp.QjBskMx6qU /tmp/tmp.NdVzCFCKuk + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.VcpGnOgway ++ mktemp + local LAST_ERR=/tmp/tmp.vJpEm3Jsd9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VcpGnOgway namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.vJpEm3Jsd9 Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.VcpGnOgway /tmp/tmp.vJpEm3Jsd9 + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.TMyauTK5aF ++ mktemp + local LAST_ERR=/tmp/tmp.MPrE2eZXOM + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.TMyauTK5aF pod/cert-manager-bd44d64d-s8gq8 condition met pod/cert-manager-cainjector-7dcddbd8b9-g2j4f condition met pod/cert-manager-webhook-6cc8fdfd7-f6dbl condition met + cat /tmp/tmp.MPrE2eZXOM + rm /tmp/tmp.TMyauTK5aF /tmp/tmp.MPrE2eZXOM + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable Error: no repo named "stable" found + : + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Jul 31 15:28:59 2024 NAMESPACE: monitoring-2-0-21162 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-21162.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.Zxvvf4Q9iQ ++ mktemp + local LAST_ERR=/tmp/tmp.UdRNlsi4vK + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Zxvvf4Q9iQ + cat /tmp/tmp.UdRNlsi4vK error: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Zxvvf4Q9iQ + cat /tmp/tmp.UdRNlsi4vK error: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.Zxvvf4Q9iQ + cat /tmp/tmp.UdRNlsi4vK error: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.Zxvvf4Q9iQ + cat /tmp/tmp.UdRNlsi4vK error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.Zxvvf4Q9iQ /tmp/tmp.UdRNlsi4vK + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.T7UKChMg1S ++ mktemp + local LAST_ERR=/tmp/tmp.iwYh0aygYT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.T7UKChMg1S + cat /tmp/tmp.iwYh0aygYT error: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.T7UKChMg1S + cat /tmp/tmp.iwYh0aygYT error: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.T7UKChMg1S + cat /tmp/tmp.iwYh0aygYT error: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.T7UKChMg1S + cat /tmp/tmp.iwYh0aygYT error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.T7UKChMg1S /tmp/tmp.iwYh0aygYT + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.rp9d0MKB6m ++ mktemp + local LAST_ERR=/tmp/tmp.xDwjJaePlF + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rp9d0MKB6m + cat /tmp/tmp.xDwjJaePlF + rm /tmp/tmp.rp9d0MKB6m /tmp/tmp.xDwjJaePlF + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.2PHZQlBvNE ++ mktemp + local LAST_ERR=/tmp/tmp.vhKXDU3DNR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2PHZQlBvNE secret/some-users created secret/some-users unchanged + cat /tmp/tmp.vhKXDU3DNR + rm /tmp/tmp.2PHZQlBvNE /tmp/tmp.vhKXDU3DNR + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.VvH2B2ZPwM ++ mktemp + local LAST_ERR=/tmp/tmp.gsS4UigvHX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.VvH2B2ZPwM deployment.apps/psmdb-client created + cat /tmp/tmp.gsS4UigvHX + rm /tmp/tmp.VvH2B2ZPwM /tmp/tmp.gsS4UigvHX + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1612-57a92fde"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '.spec.upgradeOptions.apply="Never"' ++ mktemp + local LAST_OUT=/tmp/tmp.6xIvN8sqeo ++ mktemp + local LAST_ERR=/tmp/tmp.zUZ3XIDuGk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.6xIvN8sqeo perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.zUZ3XIDuGk + rm /tmp/tmp.6xIvN8sqeo /tmp/tmp.zUZ3XIDuGk + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready...............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6EF4LLl3xl +++ mktemp ++ local LAST_ERR=/tmp/tmp.GrpaPoF396 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.6EF4LLl3xl ++ cat /tmp/tmp.GrpaPoF396 ++ rm /tmp/tmp.6EF4LLl3xl /tmp/tmp.GrpaPoF396 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready...........OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.n69ixN5wl5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wWD7BJSsH6 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.n69ixN5wl5 ++ cat /tmp/tmp.wWD7BJSsH6 ++ rm /tmp/tmp.n69ixN5wl5 /tmp/tmp.wWD7BJSsH6 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness.................. + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.tnQsXnQTGj + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_ERR=/tmp/tmp.wSlHrp2yjL + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.tnQsXnQTGj + cat /tmp/tmp.wSlHrp2yjL + rm /tmp/tmp.tnQsXnQTGj /tmp/tmp.wSlHrp2yjL + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.27 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21162 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.J4aFAesU3s +++ mktemp ++ local LAST_ERR=/tmp/tmp.F4NxzB1TUH ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.J4aFAesU3s ++ cat /tmp/tmp.F4NxzB1TUH ++ rm /tmp/tmp.J4aFAesU3s /tmp/tmp.F4NxzB1TUH ++ return 0 + local client_container=psmdb-client-6f7f47c4b-pcdjc + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.jcoYmgeoum ++ mktemp + local LAST_ERR=/tmp/tmp.IsP7HEHUXb + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jcoYmgeoum Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-31T15:34:18.215Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("2311eabd-488d-407f-95a9-84312414b506") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.IsP7HEHUXb + rm /tmp/tmp.jcoYmgeoum /tmp/tmp.IsP7HEHUXb + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21162 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w6k98wOI7Q +++ mktemp ++ local LAST_ERR=/tmp/tmp.3DY7wd1Wq5 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.w6k98wOI7Q ++ cat /tmp/tmp.3DY7wd1Wq5 ++ rm /tmp/tmp.w6k98wOI7Q /tmp/tmp.3DY7wd1Wq5 ++ return 0 + local client_container=psmdb-client-6f7f47c4b-pcdjc + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.OYY4ngEP1r ++ mktemp + local LAST_ERR=/tmp/tmp.6Y9XzA6tRI + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.OYY4ngEP1r Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-31T15:34:21.402Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("ed7c6bc0-977b-418e-b795-edc48586675f") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1722440061, 9), "signature" : { "hash" : BinData(0,"dZHKjTXi36ZsYPL00AjxbCLbQpk="), "keyId" : NumberLong("7397823366243024919") } }, "operationTime" : Timestamp(1722440061, 3) } bye + cat /tmp/tmp.6Y9XzA6tRI + rm /tmp/tmp.OYY4ngEP1r /tmp/tmp.6Y9XzA6tRI + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21162 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.XG9DsHSZJU +++ mktemp ++ local LAST_ERR=/tmp/tmp.ikQ3yHi64v ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.XG9DsHSZJU ++ cat /tmp/tmp.ikQ3yHi64v ++ rm /tmp/tmp.XG9DsHSZJU /tmp/tmp.ikQ3yHi64v ++ return 0 + local client_container=psmdb-client-6f7f47c4b-pcdjc + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.lxTbK7qHkb ++ mktemp + local LAST_ERR=/tmp/tmp.aDTiZY1D5A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.lxTbK7qHkb Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-31T15:34:24.763Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("e9623d95-bb46-42d8-b732-ef204b8d7fba") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.aDTiZY1D5A + rm /tmp/tmp.lxTbK7qHkb /tmp/tmp.aDTiZY1D5A + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21162 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.g1HjCD2zOA +++ mktemp ++ local LAST_ERR=/tmp/tmp.bMpBCT4JUd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.g1HjCD2zOA ++ cat /tmp/tmp.bMpBCT4JUd ++ rm /tmp/tmp.g1HjCD2zOA /tmp/tmp.bMpBCT4JUd ++ return 0 + local client_container=psmdb-client-6f7f47c4b-pcdjc + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.eIE1pkn4p1 ++ mktemp + local LAST_ERR=/tmp/tmp.TYaDnk6jVd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eIE1pkn4p1 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-31T15:34:28.623Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("2a107d84-324b-4be6-ad26-32e89f12db8d") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.TYaDnk6jVd + rm /tmp/tmp.eIE1pkn4p1 /tmp/tmp.TYaDnk6jVd + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-21162 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-21162 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.V0xL5IXlBB +++ mktemp ++ local LAST_ERR=/tmp/tmp.o1VVywxe59 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.V0xL5IXlBB ++ cat /tmp/tmp.o1VVywxe59 ++ rm /tmp/tmp.V0xL5IXlBB /tmp/tmp.o1VVywxe59 ++ return 0 + local client_container=psmdb-client-6f7f47c4b-pcdjc + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.P4rJ558HW5 ++ mktemp + local LAST_ERR=/tmp/tmp.7MfvMzF385 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-pcdjc -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-21162.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.P4rJ558HW5 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-21162.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-31T15:34:32.010Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("87b28c8a-0dc3-4c45-9d93-e34f3adf0c85") } Percona Server for MongoDB server version: v7.0.12-7 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.7MfvMzF385 + rm /tmp/tmp.P4rJ558HW5 /tmp/tmp.7MfvMzF385 + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ sed -e 's/^"//; s/"$//;' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.iaP9XPOk3J +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.V1IaeezgAa ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.iaP9XPOk3J ++++ cat /tmp/tmp.V1IaeezgAa ++++ rm /tmp/tmp.iaP9XPOk3J /tmp/tmp.V1IaeezgAa ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0krYilx8Uz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bNdG1Zp56E ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.0krYilx8Uz ++++ cat /tmp/tmp.bNdG1Zp56E ++++ rm /tmp/tmp.0krYilx8Uz /tmp/tmp.bNdG1Zp56E ++++ return 0 +++ local ip=34.46.142.119 +++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' +++ echo 34.46.142.119 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.46.142.119/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 256 77 --:--:-- --:--:-- --:--:-- 334 + API_KEY='"eyJrIjoidklJNzNtbHFkY2lFRk1xa0dVWkswTWFBd0g0eFE3RE0iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoidklJNzNtbHFkY2lFRk1xa0dVWkswTWFBd0g0eFE3RE0iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.uJc9Rszwws ++ mktemp + local LAST_ERR=/tmp/tmp.PSPzqn0N6e + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoidklJNzNtbHFkY2lFRk1xa0dVWkswTWFBd0g0eFE3RE0iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.uJc9Rszwws secret/some-users patched + cat /tmp/tmp.PSPzqn0N6e + rm /tmp/tmp.uJc9Rszwws /tmp/tmp.PSPzqn0N6e + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3ZYQ9HhEAW +++ mktemp ++ local LAST_ERR=/tmp/tmp.zk8OWf8NgZ ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.3ZYQ9HhEAW ++ cat /tmp/tmp.zk8OWf8NgZ ++ rm /tmp/tmp.3ZYQ9HhEAW /tmp/tmp.zk8OWf8NgZ ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gZjeIaf5Pw +++ mktemp ++ local LAST_ERR=/tmp/tmp.3lDomgwBDw ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.gZjeIaf5Pw ++ cat /tmp/tmp.3lDomgwBDw ++ rm /tmp/tmp.gZjeIaf5Pw /tmp/tmp.3lDomgwBDw ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................................................................................... + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.DEX46kYx6S ++ mktemp + local LAST_ERR=/tmp/tmp.w3xUloZKSg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DEX46kYx6S + cat /tmp/tmp.w3xUloZKSg + rm /tmp/tmp.DEX46kYx6S /tmp/tmp.w3xUloZKSg + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.JgvImX9by8/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.JgvImX9by8/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + kubectl_bin get -o yaml service/monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.EVYNvf6dqG ++ mktemp + local LAST_ERR=/tmp/tmp.iNVHYqsxK2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.EVYNvf6dqG + cat /tmp/tmp.iNVHYqsxK2 + rm /tmp/tmp.EVYNvf6dqG /tmp/tmp.iNVHYqsxK2 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/service_monitoring-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.27 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.JgvImX9by8/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.JgvImX9by8/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.z4yhozmlP9 ++ mktemp + local LAST_ERR=/tmp/tmp.xiNgR6Saz7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.z4yhozmlP9 + cat /tmp/tmp.xiNgR6Saz7 + rm /tmp/tmp.z4yhozmlP9 /tmp/tmp.xiNgR6Saz7 + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.JgvImX9by8/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.JgvImX9by8/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.aiHhJyosUl ++ mktemp + local LAST_ERR=/tmp/tmp.synOTGJbnT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.aiHhJyosUl + cat /tmp/tmp.synOTGJbnT + rm /tmp/tmp.aiHhJyosUl /tmp/tmp.synOTGJbnT + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.JgvImX9by8/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.JgvImX9by8/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos ++ mktemp + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-21162", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + local LAST_OUT=/tmp/tmp.7pXuWmrDPf ++ mktemp + local LAST_ERR=/tmp/tmp.sA80mpHRUl + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.7pXuWmrDPf + cat /tmp/tmp.sA80mpHRUl + rm /tmp/tmp.7pXuWmrDPf /tmp/tmp.sA80mpHRUl + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.JgvImX9by8/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.JgvImX9by8/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-21162-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21162-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1722440312 ++ /usr/bin/date -u +%s + local end=1722440372 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m2Hn1xPV9M ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Eo7WnF8zIi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.m2Hn1xPV9M +++ cat /tmp/tmp.Eo7WnF8zIi +++ rm /tmp/tmp.m2Hn1xPV9M /tmp/tmp.Eo7WnF8zIi +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aCy79FUuYP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5y1AeLpOmN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ sed -e 's/^"//; s/"$//;' +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.aCy79FUuYP +++ cat /tmp/tmp.5y1AeLpOmN +++ rm /tmp/tmp.aCy79FUuYP /tmp/tmp.5y1AeLpOmN +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + local endpoint=34.46.142.119 + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.142.119/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-rs0-1%22%7D%29&start=1722440312&end=1722440372&step=60' + jq '.data.result[0].values[][1]' "1722439469" "1722439469" + get_metric_values mongodb_connections monitoring-2-0-21162-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-21162-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1722440314 ++ /usr/bin/date -u +%s + local end=1722440374 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.tJLog0MtLl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YfcaJgmmdw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.tJLog0MtLl +++ cat /tmp/tmp.YfcaJgmmdw +++ rm /tmp/tmp.tJLog0MtLl /tmp/tmp.YfcaJgmmdw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.BUaLi1dVAi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vqdkWCQuWP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.BUaLi1dVAi +++ cat /tmp/tmp.vqdkWCQuWP +++ rm /tmp/tmp.BUaLi1dVAi /tmp/tmp.vqdkWCQuWP +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + local endpoint=34.46.142.119 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.142.119/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-rs0-1%22%7D%29&start=1722440314&end=1722440374&step=60' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-21162-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21162-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1722440317 ++ /usr/bin/date -u +%s + local end=1722440377 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Nz8vzWbmOV +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qyUv4RIQpY +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Nz8vzWbmOV +++ cat /tmp/tmp.qyUv4RIQpY +++ rm /tmp/tmp.Nz8vzWbmOV /tmp/tmp.qyUv4RIQpY +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xLd923iszT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sPvuqrSi3Z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.xLd923iszT +++ cat /tmp/tmp.sPvuqrSi3Z +++ rm /tmp/tmp.xLd923iszT /tmp/tmp.sPvuqrSi3Z +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + local endpoint=34.46.142.119 + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.142.119/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-cfg-1%22%7D%29&start=1722440317&end=1722440377&step=60' + jq '.data.result[0].values[][1]' "1722437858" "1722437858" + get_metric_values mongodb_connections monitoring-2-0-21162-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-21162-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1722440320 ++ /usr/bin/date -u +%s + local end=1722440380 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cjGC5VH2k0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V6lI16Qjq1 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cjGC5VH2k0 +++ cat /tmp/tmp.V6lI16Qjq1 +++ rm /tmp/tmp.cjGC5VH2k0 /tmp/tmp.V6lI16Qjq1 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ local LAST_OUT=/tmp/tmp.zburBvTMJm ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5vXTKZVCj5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zburBvTMJm +++ cat /tmp/tmp.5vXTKZVCj5 +++ rm /tmp/tmp.zburBvTMJm /tmp/tmp.5vXTKZVCj5 +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + local endpoint=34.46.142.119 + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.142.119/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-cfg-1%22%7D%29&start=1722440320&end=1722440380&step=60' + jq '.data.result[0].values[][1]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-21162-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-21162-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1722440324 ++ /usr/bin/date -u +%s + local end=1722440384 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rnuE7zPxHs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bWD9mVgXAx +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.rnuE7zPxHs +++ cat /tmp/tmp.bWD9mVgXAx +++ rm /tmp/tmp.rnuE7zPxHs /tmp/tmp.bWD9mVgXAx +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.d2FWaW1gwy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cID2WKDBI8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.d2FWaW1gwy +++ cat /tmp/tmp.cID2WKDBI8 +++ rm /tmp/tmp.d2FWaW1gwy /tmp/tmp.cID2WKDBI8 +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + local endpoint=34.46.142.119 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.46.142.119/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-21162-monitoring-mongos-0%22%7D%29&start=1722440324&end=1722440384&step=60' "1722439469" "1722439469" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-31T03:41:17+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-31T15:41:17+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qsc44PqHh8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VDzr4UTuhP +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.Qsc44PqHh8 +++ cat /tmp/tmp.VDzr4UTuhP +++ rm /tmp/tmp.Qsc44PqHh8 /tmp/tmp.VDzr4UTuhP +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HJhJ7L7s9h ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dFXev7OuuA +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.HJhJ7L7s9h +++ cat /tmp/tmp.dFXev7OuuA +++ rm /tmp/tmp.HJhJ7L7s9h /tmp/tmp.dFXev7OuuA +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + endpoint=34.46.142.119 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.46.142.119/v0/qan/GetReport ++ jq '.rows[].fingerprint' + [[ "TOTAL" "FIND version" "FIND system.version _id" "FIND oplog.rs" != \n\u\l\l ]] + rm -f payload.json + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-31T03:41:20+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-31T15:41:20+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RsE0sl4dkT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.cUGrbeiCHh +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.RsE0sl4dkT +++ cat /tmp/tmp.cUGrbeiCHh +++ rm /tmp/tmp.RsE0sl4dkT /tmp/tmp.cUGrbeiCHh +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ktHbBElim0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.q0lIpkwClW +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.ktHbBElim0 +++ cat /tmp/tmp.q0lIpkwClW +++ rm /tmp/tmp.ktHbBElim0 /tmp/tmp.q0lIpkwClW +++ return 0 ++ local ip=34.46.142.119 ++ '[' -n 34.46.142.119 -a 34.46.142.119 '!=' null ']' ++ echo 34.46.142.119 ++ return + endpoint=34.46.142.119 + cat + local response + retry=0 ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.46.142.119/v0/qan/GetReport ++ jq '.rows[].fingerprint' + [[ "TOTAL" "DBSTATS application,architecture,client,clusterTime,db,dbStats,driver,hash,host,id,keyId,level,lsid,mayBypassWriteBlocking,mongos,name,os,platform,provenance,readConcern,role,scale,signature,type,uid,user,version" != \n\u\l\l ]] + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.X5hjkJ1Y5Q ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6koyMKJutM +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.X5hjkJ1Y5Q +++ cat /tmp/tmp.6koyMKJutM +++ rm /tmp/tmp.X5hjkJ1Y5Q /tmp/tmp.6koyMKJutM +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zUvExWS45z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qxciIKbdza +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-cfg-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zUvExWS45z +++ cat /tmp/tmp.qxciIKbdza +++ rm /tmp/tmp.zUvExWS45z /tmp/tmp.qxciIKbdza +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.disOpdmeih ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NGLknmfXRJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-cfg-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.disOpdmeih +++ cat /tmp/tmp.NGLknmfXRJ +++ rm /tmp/tmp.disOpdmeih /tmp/tmp.NGLknmfXRJ +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GPUUvOrb7b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.H8dT63Q8BZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-cfg-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.GPUUvOrb7b +++ cat /tmp/tmp.H8dT63Q8BZ +++ rm /tmp/tmp.GPUUvOrb7b /tmp/tmp.H8dT63Q8BZ +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OpYzAyGfF6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jUti0ro6Ww +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-mongos-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.OpYzAyGfF6 +++ cat /tmp/tmp.jUti0ro6Ww +++ rm /tmp/tmp.OpYzAyGfF6 /tmp/tmp.jUti0ro6Ww +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zFSa4Ckq5X ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CV6sJXAQcu +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-mongos-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.zFSa4Ckq5X +++ cat /tmp/tmp.CV6sJXAQcu +++ rm /tmp/tmp.zFSa4Ckq5X /tmp/tmp.CV6sJXAQcu +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.T3n13XIz87 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WcNZzawboB +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-mongos-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.T3n13XIz87 +++ cat /tmp/tmp.WcNZzawboB +++ rm /tmp/tmp.T3n13XIz87 /tmp/tmp.WcNZzawboB +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TKjgMprkIQ ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_ERR=/tmp/tmp.hl8B4Wk0mZ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-rs0-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TKjgMprkIQ +++ cat /tmp/tmp.hl8B4Wk0mZ +++ rm /tmp/tmp.TKjgMprkIQ /tmp/tmp.hl8B4Wk0mZ +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3Zn2uvyH1h ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DcdtScLsAo +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-rs0-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.3Zn2uvyH1h +++ cat /tmp/tmp.DcdtScLsAo +++ rm /tmp/tmp.3Zn2uvyH1h /tmp/tmp.DcdtScLsAo +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/name=percona-server-mongodb --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QrFNDmGP3J ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aRvrbbhPLi +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-rs0-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.QrFNDmGP3J +++ cat /tmp/tmp.aRvrbbhPLi +++ rm /tmp/tmp.QrFNDmGP3J /tmp/tmp.aRvrbbhPLi +++ return 0 ++ echo /node_id/b5643f6b-16d8-4399-bd97-ef2af409f0ff /node_id/905780a4-2f2a-4bfe-9b2f-94f6640c3651 /node_id/2f9e1a1b-ac27-4fef-bf5b-28258628ed3c /node_id/2c04f4e7-91d3-4ccf-8646-237abd69d285 /node_id/2765184e-1303-469d-9112-7a423f5d499a /node_id/d3e1337e-146c-4616-bbcd-31b4b64e3574 /node_id/d3452873-5768-431c-9b1b-491687b9cbb7 /node_id/d3be8ad0-8962-4d9f-aec2-2dc01dd83a30 /node_id/32268b1c-e43e-4fab-821a-23484986c7a0 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/b5643f6b-16d8-4399-bd97-ef2af409f0ff /node_id/905780a4-2f2a-4bfe-9b2f-94f6640c3651 /node_id/2f9e1a1b-ac27-4fef-bf5b-28258628ed3c /node_id/2c04f4e7-91d3-4ccf-8646-237abd69d285 /node_id/2765184e-1303-469d-9112-7a423f5d499a /node_id/d3e1337e-146c-4616-bbcd-31b4b64e3574 /node_id/d3452873-5768-431c-9b1b-491687b9cbb7 /node_id/d3be8ad0-8962-4d9f-aec2-2dc01dd83a30 /node_id/32268b1c-e43e-4fab-821a-23484986c7a0 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b5643f6b-16d8-4399-bd97-ef2af409f0ff +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.EOyX2OiYao ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.b3DQCnXXav +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EOyX2OiYao +++++ cat /tmp/tmp.b3DQCnXXav Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EOyX2OiYao +++++ cat /tmp/tmp.b3DQCnXXav Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.EOyX2OiYao +++++ cat /tmp/tmp.b3DQCnXXav Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.EOyX2OiYao +++++ cat /tmp/tmp.b3DQCnXXav Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.EOyX2OiYao /tmp/tmp.b3DQCnXXav +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YUlfnZnZNp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2w3Ur5yN5Y +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YUlfnZnZNp +++ cat /tmp/tmp.2w3Ur5yN5Y command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YUlfnZnZNp +++ cat /tmp/tmp.2w3Ur5yN5Y command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YUlfnZnZNp +++ cat /tmp/tmp.2w3Ur5yN5Y command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.YUlfnZnZNp +++ cat /tmp/tmp.2w3Ur5yN5Y command terminated with exit code 1 +++ rm /tmp/tmp.YUlfnZnZNp /tmp/tmp.2w3Ur5yN5Y +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/905780a4-2f2a-4bfe-9b2f-94f6640c3651 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.4I8hWNxAPS ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.m5NzdQfdTT +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.4I8hWNxAPS +++++ cat /tmp/tmp.m5NzdQfdTT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.4I8hWNxAPS +++++ cat /tmp/tmp.m5NzdQfdTT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.4I8hWNxAPS +++++ cat /tmp/tmp.m5NzdQfdTT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.4I8hWNxAPS +++++ cat /tmp/tmp.m5NzdQfdTT Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.4I8hWNxAPS /tmp/tmp.m5NzdQfdTT +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NIlq5rhbdp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.nASHKQZAZ2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.NIlq5rhbdp +++ cat /tmp/tmp.nASHKQZAZ2 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.NIlq5rhbdp +++ cat /tmp/tmp.nASHKQZAZ2 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.NIlq5rhbdp +++ cat /tmp/tmp.nASHKQZAZ2 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.NIlq5rhbdp +++ cat /tmp/tmp.nASHKQZAZ2 command terminated with exit code 1 +++ rm /tmp/tmp.NIlq5rhbdp /tmp/tmp.nASHKQZAZ2 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/2f9e1a1b-ac27-4fef-bf5b-28258628ed3c ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.s4xdnwM3sB ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.8n8iL0nCme +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s4xdnwM3sB +++++ cat /tmp/tmp.8n8iL0nCme Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s4xdnwM3sB +++++ cat /tmp/tmp.8n8iL0nCme Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.s4xdnwM3sB +++++ cat /tmp/tmp.8n8iL0nCme Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.s4xdnwM3sB +++++ cat /tmp/tmp.8n8iL0nCme Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.s4xdnwM3sB /tmp/tmp.8n8iL0nCme +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RDpLEshNme ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hwc9aVtXCm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RDpLEshNme +++ cat /tmp/tmp.hwc9aVtXCm command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RDpLEshNme +++ cat /tmp/tmp.hwc9aVtXCm command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.RDpLEshNme +++ cat /tmp/tmp.hwc9aVtXCm command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.RDpLEshNme +++ cat /tmp/tmp.hwc9aVtXCm command terminated with exit code 1 +++ rm /tmp/tmp.RDpLEshNme /tmp/tmp.hwc9aVtXCm +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2c04f4e7-91d3-4ccf-8646-237abd69d285 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.wROgJ39p9P ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.vR8MWWjXtf +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.wROgJ39p9P +++++ cat /tmp/tmp.vR8MWWjXtf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.wROgJ39p9P +++++ cat /tmp/tmp.vR8MWWjXtf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.wROgJ39p9P +++++ cat /tmp/tmp.vR8MWWjXtf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.wROgJ39p9P +++++ cat /tmp/tmp.vR8MWWjXtf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.wROgJ39p9P /tmp/tmp.vR8MWWjXtf +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m2vJv9bxfJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hRpdrqvyd5 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.m2vJv9bxfJ +++ cat /tmp/tmp.hRpdrqvyd5 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.m2vJv9bxfJ +++ cat /tmp/tmp.hRpdrqvyd5 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.m2vJv9bxfJ +++ cat /tmp/tmp.hRpdrqvyd5 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.m2vJv9bxfJ +++ cat /tmp/tmp.hRpdrqvyd5 command terminated with exit code 1 +++ rm /tmp/tmp.m2vJv9bxfJ /tmp/tmp.hRpdrqvyd5 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2765184e-1303-469d-9112-7a423f5d499a +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.n42pmBTavG ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Rnl4l8x4rj +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.n42pmBTavG +++++ cat /tmp/tmp.Rnl4l8x4rj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.n42pmBTavG +++++ cat /tmp/tmp.Rnl4l8x4rj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.n42pmBTavG +++++ cat /tmp/tmp.Rnl4l8x4rj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.n42pmBTavG +++++ cat /tmp/tmp.Rnl4l8x4rj Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.n42pmBTavG /tmp/tmp.Rnl4l8x4rj +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.nkIh5xQEFC ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9lFo1ZmHnl +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nkIh5xQEFC +++ cat /tmp/tmp.9lFo1ZmHnl command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nkIh5xQEFC +++ cat /tmp/tmp.9lFo1ZmHnl command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.nkIh5xQEFC +++ cat /tmp/tmp.9lFo1ZmHnl command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.nkIh5xQEFC +++ cat /tmp/tmp.9lFo1ZmHnl command terminated with exit code 1 +++ rm /tmp/tmp.nkIh5xQEFC /tmp/tmp.9lFo1ZmHnl +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d3e1337e-146c-4616-bbcd-31b4b64e3574 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.WZC7oJ7SQ2 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.2zWecR3CHJ +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WZC7oJ7SQ2 +++++ cat /tmp/tmp.2zWecR3CHJ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WZC7oJ7SQ2 +++++ cat /tmp/tmp.2zWecR3CHJ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WZC7oJ7SQ2 +++++ cat /tmp/tmp.2zWecR3CHJ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.WZC7oJ7SQ2 +++++ cat /tmp/tmp.2zWecR3CHJ Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.WZC7oJ7SQ2 /tmp/tmp.2zWecR3CHJ +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JFU7QZ9GB5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xqeo3pr06L +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.JFU7QZ9GB5 +++ cat /tmp/tmp.Xqeo3pr06L command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.JFU7QZ9GB5 +++ cat /tmp/tmp.Xqeo3pr06L command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.JFU7QZ9GB5 +++ cat /tmp/tmp.Xqeo3pr06L command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.JFU7QZ9GB5 +++ cat /tmp/tmp.Xqeo3pr06L command terminated with exit code 1 +++ rm /tmp/tmp.JFU7QZ9GB5 /tmp/tmp.Xqeo3pr06L +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d3452873-5768-431c-9b1b-491687b9cbb7 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.nfs2ZoaX5r ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.OyTaLdDYFi +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nfs2ZoaX5r +++++ cat /tmp/tmp.OyTaLdDYFi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nfs2ZoaX5r +++++ cat /tmp/tmp.OyTaLdDYFi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nfs2ZoaX5r +++++ cat /tmp/tmp.OyTaLdDYFi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.nfs2ZoaX5r +++++ cat /tmp/tmp.OyTaLdDYFi Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.nfs2ZoaX5r /tmp/tmp.OyTaLdDYFi +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xJXTpXkWfZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7isTsXi95z +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xJXTpXkWfZ +++ cat /tmp/tmp.7isTsXi95z command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xJXTpXkWfZ +++ cat /tmp/tmp.7isTsXi95z command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xJXTpXkWfZ +++ cat /tmp/tmp.7isTsXi95z command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.xJXTpXkWfZ +++ cat /tmp/tmp.7isTsXi95z command terminated with exit code 1 +++ rm /tmp/tmp.xJXTpXkWfZ /tmp/tmp.7isTsXi95z +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d3be8ad0-8962-4d9f-aec2-2dc01dd83a30 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++ awk '{print $4}' +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.g4yfFztSis ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hX7Mh6udFB +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g4yfFztSis +++++ cat /tmp/tmp.hX7Mh6udFB Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g4yfFztSis +++++ cat /tmp/tmp.hX7Mh6udFB Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.g4yfFztSis +++++ cat /tmp/tmp.hX7Mh6udFB Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.g4yfFztSis +++++ cat /tmp/tmp.hX7Mh6udFB Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.g4yfFztSis /tmp/tmp.hX7Mh6udFB +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.keXnt9mo0D ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5eE3whw2F4 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.keXnt9mo0D +++ cat /tmp/tmp.5eE3whw2F4 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.keXnt9mo0D +++ cat /tmp/tmp.5eE3whw2F4 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.keXnt9mo0D +++ cat /tmp/tmp.5eE3whw2F4 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.keXnt9mo0D +++ cat /tmp/tmp.5eE3whw2F4 command terminated with exit code 1 +++ rm /tmp/tmp.keXnt9mo0D /tmp/tmp.5eE3whw2F4 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service +++ grep /node_id/32268b1c-e43e-4fab-821a-23484986c7a0 ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.6AubJAapDh ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.LELBR7PjJG +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6AubJAapDh +++++ cat /tmp/tmp.LELBR7PjJG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6AubJAapDh +++++ cat /tmp/tmp.LELBR7PjJG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.6AubJAapDh +++++ cat /tmp/tmp.LELBR7PjJG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.6AubJAapDh +++++ cat /tmp/tmp.LELBR7PjJG Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.6AubJAapDh /tmp/tmp.LELBR7PjJG +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0LVcpHuAIq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vmERvuTQjw +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0LVcpHuAIq +++ cat /tmp/tmp.vmERvuTQjw command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0LVcpHuAIq +++ cat /tmp/tmp.vmERvuTQjw command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.0LVcpHuAIq +++ cat /tmp/tmp.vmERvuTQjw command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.0LVcpHuAIq +++ cat /tmp/tmp.vmERvuTQjw command terminated with exit code 1 +++ rm /tmp/tmp.0LVcpHuAIq /tmp/tmp.vmERvuTQjw +++ return 1 ++ echo + kubectl_bin patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.BTnUFfs0jy ++ mktemp + local LAST_ERR=/tmp/tmp.5Daa9FMteS + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch psmdb monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BTnUFfs0jy perconaservermongodb.psmdb.percona.com/monitoring patched + cat /tmp/tmp.5Daa9FMteS + rm /tmp/tmp.BTnUFfs0jy /tmp/tmp.5Daa9FMteS + return 0 + wait_for_delete pod/monitoring-mongos-0 + local res=pod/monitoring-mongos-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-mongos-0 to be deleted.............................Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found Error from server (NotFound): pods "monitoring-mongos-0" not found + wait_for_delete pod/monitoring-rs0-0 + local res=pod/monitoring-rs0-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-rs0-0 to be deleted....Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found Error from server (NotFound): pods "monitoring-rs0-0" not found + wait_for_delete pod/monitoring-cfg-0 + local res=pod/monitoring-cfg-0 + local wait_time=60 + set +o xtrace waiting for pod/monitoring-cfg-0 to be deleted.....Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found Error from server (NotFound): pods "monitoring-cfg-0" not found + desc 'check if services are not deleted' + set +o xtrace ----------------------------------------------------------------------------------- check if services are not deleted ----------------------------------------------------------------------------------- + kubectl_bin get svc monitoring-rs0 ++ mktemp + local LAST_OUT=/tmp/tmp.eQCD81AMSw ++ mktemp + local LAST_ERR=/tmp/tmp.fPM0tFKlqf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.eQCD81AMSw NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-rs0 ClusterIP None 27017/TCP 17m + cat /tmp/tmp.fPM0tFKlqf + rm /tmp/tmp.eQCD81AMSw /tmp/tmp.fPM0tFKlqf + return 0 + kubectl_bin get svc monitoring-cfg ++ mktemp + local LAST_OUT=/tmp/tmp.rEMw34yxEK ++ mktemp + local LAST_ERR=/tmp/tmp.6ZlYKNycHs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rEMw34yxEK NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-cfg ClusterIP None 27017/TCP 17m + cat /tmp/tmp.6ZlYKNycHs + rm /tmp/tmp.rEMw34yxEK /tmp/tmp.6ZlYKNycHs + return 0 + kubectl_bin get svc monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.jtQYuz9jyg ++ mktemp + local LAST_ERR=/tmp/tmp.36QZyVgqub + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get svc monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.jtQYuz9jyg NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE monitoring-mongos ClusterIP 10.232.236.197 27017/TCP 15m + cat /tmp/tmp.36QZyVgqub + rm /tmp/tmp.jtQYuz9jyg /tmp/tmp.36QZyVgqub + return 0 + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/b5643f6b-16d8-4399-bd97-ef2af409f0ff /node_id/905780a4-2f2a-4bfe-9b2f-94f6640c3651 /node_id/2f9e1a1b-ac27-4fef-bf5b-28258628ed3c /node_id/2c04f4e7-91d3-4ccf-8646-237abd69d285 /node_id/2765184e-1303-469d-9112-7a423f5d499a /node_id/d3e1337e-146c-4616-bbcd-31b4b64e3574 /node_id/d3452873-5768-431c-9b1b-491687b9cbb7 /node_id/d3be8ad0-8962-4d9f-aec2-2dc01dd83a30 /node_id/32268b1c-e43e-4fab-821a-23484986c7a0 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b5643f6b-16d8-4399-bd97-ef2af409f0ff +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.nvtxDnjA4E ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.uBPR6SRKlY +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nvtxDnjA4E +++++ cat /tmp/tmp.uBPR6SRKlY Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nvtxDnjA4E +++++ cat /tmp/tmp.uBPR6SRKlY Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.nvtxDnjA4E +++++ cat /tmp/tmp.uBPR6SRKlY Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.nvtxDnjA4E +++++ cat /tmp/tmp.uBPR6SRKlY Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.nvtxDnjA4E /tmp/tmp.uBPR6SRKlY +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AGGcLX7j8W ++++ mktemp +++ local LAST_ERR=/tmp/tmp.FVXOWcCmxm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AGGcLX7j8W +++ cat /tmp/tmp.FVXOWcCmxm command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AGGcLX7j8W +++ cat /tmp/tmp.FVXOWcCmxm command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.AGGcLX7j8W +++ cat /tmp/tmp.FVXOWcCmxm command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.AGGcLX7j8W +++ cat /tmp/tmp.FVXOWcCmxm command terminated with exit code 1 +++ rm /tmp/tmp.AGGcLX7j8W /tmp/tmp.FVXOWcCmxm +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/905780a4-2f2a-4bfe-9b2f-94f6640c3651 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.F2Fjjpfftd ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.j7cTrOG9iN +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.F2Fjjpfftd +++++ cat /tmp/tmp.j7cTrOG9iN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.F2Fjjpfftd +++++ cat /tmp/tmp.j7cTrOG9iN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.F2Fjjpfftd +++++ cat /tmp/tmp.j7cTrOG9iN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.F2Fjjpfftd +++++ cat /tmp/tmp.j7cTrOG9iN Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.F2Fjjpfftd /tmp/tmp.j7cTrOG9iN +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dvS7TIvDQU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.L66UpAoQdm +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dvS7TIvDQU +++ cat /tmp/tmp.L66UpAoQdm command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dvS7TIvDQU +++ cat /tmp/tmp.L66UpAoQdm command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.dvS7TIvDQU +++ cat /tmp/tmp.L66UpAoQdm command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.dvS7TIvDQU +++ cat /tmp/tmp.L66UpAoQdm command terminated with exit code 1 +++ rm /tmp/tmp.dvS7TIvDQU /tmp/tmp.L66UpAoQdm +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/2f9e1a1b-ac27-4fef-bf5b-28258628ed3c ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.IqIzJQEIPK ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NiLyFaZAGX +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.IqIzJQEIPK +++++ cat /tmp/tmp.NiLyFaZAGX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.IqIzJQEIPK +++++ cat /tmp/tmp.NiLyFaZAGX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.IqIzJQEIPK +++++ cat /tmp/tmp.NiLyFaZAGX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.IqIzJQEIPK +++++ cat /tmp/tmp.NiLyFaZAGX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.IqIzJQEIPK /tmp/tmp.NiLyFaZAGX +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.fkpbTqodAK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Kltw8prj19 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fkpbTqodAK +++ cat /tmp/tmp.Kltw8prj19 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fkpbTqodAK +++ cat /tmp/tmp.Kltw8prj19 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.fkpbTqodAK +++ cat /tmp/tmp.Kltw8prj19 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.fkpbTqodAK +++ cat /tmp/tmp.Kltw8prj19 command terminated with exit code 1 +++ rm /tmp/tmp.fkpbTqodAK /tmp/tmp.Kltw8prj19 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2c04f4e7-91d3-4ccf-8646-237abd69d285 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.o9RQXhwGQn ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.1xM6GzGKbD +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.o9RQXhwGQn +++++ cat /tmp/tmp.1xM6GzGKbD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.o9RQXhwGQn +++++ cat /tmp/tmp.1xM6GzGKbD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.o9RQXhwGQn +++++ cat /tmp/tmp.1xM6GzGKbD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.o9RQXhwGQn +++++ cat /tmp/tmp.1xM6GzGKbD Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.o9RQXhwGQn /tmp/tmp.1xM6GzGKbD +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.sEYn2mXnuH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1jcvB6wiVj +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sEYn2mXnuH +++ cat /tmp/tmp.1jcvB6wiVj command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sEYn2mXnuH +++ cat /tmp/tmp.1jcvB6wiVj command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.sEYn2mXnuH +++ cat /tmp/tmp.1jcvB6wiVj command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.sEYn2mXnuH +++ cat /tmp/tmp.1jcvB6wiVj command terminated with exit code 1 +++ rm /tmp/tmp.sEYn2mXnuH /tmp/tmp.1jcvB6wiVj +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/2765184e-1303-469d-9112-7a423f5d499a ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.xvMSahPoOa ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.veCyf3FLLU +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xvMSahPoOa +++++ cat /tmp/tmp.veCyf3FLLU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xvMSahPoOa +++++ cat /tmp/tmp.veCyf3FLLU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.xvMSahPoOa +++++ cat /tmp/tmp.veCyf3FLLU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.xvMSahPoOa +++++ cat /tmp/tmp.veCyf3FLLU Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.xvMSahPoOa /tmp/tmp.veCyf3FLLU +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YvQ7rE2wdn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.OsaEqLH5sz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YvQ7rE2wdn +++ cat /tmp/tmp.OsaEqLH5sz command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YvQ7rE2wdn +++ cat /tmp/tmp.OsaEqLH5sz command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.YvQ7rE2wdn +++ cat /tmp/tmp.OsaEqLH5sz command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.YvQ7rE2wdn +++ cat /tmp/tmp.OsaEqLH5sz command terminated with exit code 1 +++ rm /tmp/tmp.YvQ7rE2wdn /tmp/tmp.OsaEqLH5sz +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ grep /node_id/d3e1337e-146c-4616-bbcd-31b4b64e3574 ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.WoGEP6FOOR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ubp7QPHYy9 +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WoGEP6FOOR +++++ cat /tmp/tmp.ubp7QPHYy9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WoGEP6FOOR +++++ cat /tmp/tmp.ubp7QPHYy9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.WoGEP6FOOR +++++ cat /tmp/tmp.ubp7QPHYy9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.WoGEP6FOOR +++++ cat /tmp/tmp.ubp7QPHYy9 Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.WoGEP6FOOR /tmp/tmp.ubp7QPHYy9 +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xWfAmCKg9c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qpqZRC1H6e +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xWfAmCKg9c +++ cat /tmp/tmp.qpqZRC1H6e command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xWfAmCKg9c +++ cat /tmp/tmp.qpqZRC1H6e command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.xWfAmCKg9c +++ cat /tmp/tmp.qpqZRC1H6e command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.xWfAmCKg9c +++ cat /tmp/tmp.qpqZRC1H6e command terminated with exit code 1 +++ rm /tmp/tmp.xWfAmCKg9c /tmp/tmp.qpqZRC1H6e +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d3452873-5768-431c-9b1b-491687b9cbb7 ++++ get_service_ip monitoring-service +++ awk '{print $4}' ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.vaMAGU2MrM ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.DJIo7WnWMr +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vaMAGU2MrM +++++ cat /tmp/tmp.DJIo7WnWMr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vaMAGU2MrM +++++ cat /tmp/tmp.DJIo7WnWMr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.vaMAGU2MrM +++++ cat /tmp/tmp.DJIo7WnWMr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.vaMAGU2MrM +++++ cat /tmp/tmp.DJIo7WnWMr Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.vaMAGU2MrM /tmp/tmp.DJIo7WnWMr +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.drdgF0Sol2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iuo8QPm5l9 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.drdgF0Sol2 +++ cat /tmp/tmp.iuo8QPm5l9 command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.drdgF0Sol2 +++ cat /tmp/tmp.iuo8QPm5l9 command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.drdgF0Sol2 +++ cat /tmp/tmp.iuo8QPm5l9 command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.drdgF0Sol2 +++ cat /tmp/tmp.iuo8QPm5l9 command terminated with exit code 1 +++ rm /tmp/tmp.drdgF0Sol2 /tmp/tmp.iuo8QPm5l9 +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/d3be8ad0-8962-4d9f-aec2-2dc01dd83a30 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.VUjW7uGXjB ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.N4JIFRM8FX +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.VUjW7uGXjB +++++ cat /tmp/tmp.N4JIFRM8FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.VUjW7uGXjB +++++ cat /tmp/tmp.N4JIFRM8FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.VUjW7uGXjB +++++ cat /tmp/tmp.N4JIFRM8FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.VUjW7uGXjB +++++ cat /tmp/tmp.N4JIFRM8FX Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.VUjW7uGXjB /tmp/tmp.N4JIFRM8FX +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.biH3J2HGCy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RrcoObZnxL +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.biH3J2HGCy +++ cat /tmp/tmp.RrcoObZnxL command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.biH3J2HGCy +++ cat /tmp/tmp.RrcoObZnxL command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.biH3J2HGCy +++ cat /tmp/tmp.RrcoObZnxL command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.biH3J2HGCy +++ cat /tmp/tmp.RrcoObZnxL command terminated with exit code 1 +++ rm /tmp/tmp.biH3J2HGCy /tmp/tmp.RrcoObZnxL +++ return 1 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/32268b1c-e43e-4fab-821a-23484986c7a0 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ local server_type=rs0 +++++ kubectl_bin get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++ awk '{print $4}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.uXoC6Pb8MQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.v6Hv3hF9Pf +++++ local exit_status=0 +++++ local timeout=4 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uXoC6Pb8MQ +++++ cat /tmp/tmp.v6Hv3hF9Pf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 0 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uXoC6Pb8MQ +++++ cat /tmp/tmp.v6Hv3hF9Pf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 4 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get psmdb/monitoring-service -o 'jsonpath={.spec.replsets[].expose.enabled}' +++++ exit_status=1 +++++ set -e +++++ '[' 1 '!=' 0 -a -n 1 ']' +++++ cat /tmp/tmp.uXoC6Pb8MQ +++++ cat /tmp/tmp.v6Hv3hF9Pf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ sleep 8 +++++ cat /tmp/tmp.uXoC6Pb8MQ +++++ cat /tmp/tmp.v6Hv3hF9Pf Error from server (NotFound): perconaservermongodbs.psmdb.percona.com "monitoring-service" not found +++++ rm /tmp/tmp.uXoC6Pb8MQ /tmp/tmp.v6Hv3hF9Pf +++++ return 1 ++++ '[' '' '!=' true ']' ++++ echo -n monitoring-service.monitoring-service-rs0 ++++ return +++ kubectl_bin exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CSYQBso7AA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.k5YVI24mVq +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.CSYQBso7AA +++ cat /tmp/tmp.k5YVI24mVq command terminated with exit code 1 +++ sleep 0 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.CSYQBso7AA +++ cat /tmp/tmp.k5YVI24mVq command terminated with exit code 1 +++ sleep 4 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-21162 monitoring-0 -- pmm-admin --server-url=https://admin:admin@monitoring-service.monitoring-service-rs0/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=1 +++ set -e +++ '[' 1 '!=' 0 -a -n 1 ']' +++ cat /tmp/tmp.CSYQBso7AA +++ cat /tmp/tmp.k5YVI24mVq command terminated with exit code 1 +++ sleep 8 +++ cat /tmp/tmp.CSYQBso7AA +++ cat /tmp/tmp.k5YVI24mVq command terminated with exit code 1 +++ rm /tmp/tmp.CSYQBso7AA /tmp/tmp.k5YVI24mVq +++ return 1 ++ echo + [[ -n '' ]] ++ kubectl_bin logs monitoring-rs0-0 pmm-client ++ grep -c 'cannot auto discover databases and collections' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k0reHTsemE +++ mktemp ++ local LAST_ERR=/tmp/tmp.vbDHaUe4Ow ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.k0reHTsemE ++ cat /tmp/tmp.vbDHaUe4Ow error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21162" ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.k0reHTsemE ++ cat /tmp/tmp.vbDHaUe4Ow error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21162" ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl logs monitoring-rs0-0 pmm-client ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.k0reHTsemE ++ cat /tmp/tmp.vbDHaUe4Ow error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21162" ++ sleep 8 ++ cat /tmp/tmp.k0reHTsemE ++ cat /tmp/tmp.vbDHaUe4Ow error: error from server (NotFound): pods "monitoring-rs0-0" not found in namespace "monitoring-2-0-21162" ++ rm /tmp/tmp.k0reHTsemE /tmp/tmp.vbDHaUe4Ow ++ return 1 + [[ 0 != 0 ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ jq -r '.items[].data | to_entries | .[] | select(.key | (contains("_PASSWORD"))) | .value' ++ kubectl_bin get secrets -o json +++ mktemp ++ local LAST_OUT=/tmp/tmp.lvbZxgksZ1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Kh2QERBD5J ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.lvbZxgksZ1 ++ cat /tmp/tmp.Kh2QERBD5J ++ rm /tmp/tmp.lvbZxgksZ1 /tmp/tmp.Kh2QERBD5J ++ return 0 + secrets='YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 secrets=YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2' + echo passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 passwords=backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 backup123456 clusterAdmin123456 clusterMonitor123456 databaseAdmin123456 userAdmin123456 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 YmFja3VwMTIzNDU2 Y2x1c3RlckFkbWluMTIzNDU2 Y2x1c3Rlck1vbml0b3IxMjM0NTY= ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== dXNlckFkbWluMTIzNDU2 ++ kubectl_bin get pods -o name ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rxZmBCgdZb +++ mktemp ++ local LAST_ERR=/tmp/tmp.CpakGYSUCa ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rxZmBCgdZb ++ cat /tmp/tmp.CpakGYSUCa ++ rm /tmp/tmp.rxZmBCgdZb /tmp/tmp.CpakGYSUCa ++ return 0 + pods='monitoring-0 psmdb-client-6f7f47c4b-pcdjc' + echo pods=monitoring-0 psmdb-client-6f7f47c4b-pcdjc pods=monitoring-0 psmdb-client-6f7f47c4b-pcdjc + collect_logs monitoring-2-0-21162 + local containers + local count + NS=monitoring-2-0-21162 + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-21162 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jyrhAMBJu5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.q2H7Jqbx5H ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-21162 get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.jyrhAMBJu5 ++ cat /tmp/tmp.q2H7Jqbx5H ++ rm /tmp/tmp.jyrhAMBJu5 /tmp/tmp.q2H7Jqbx5H ++ return 0 + containers=monitoring + for c in '$containers' + [[ monitoring =~ pmm ]] + kubectl_bin -n monitoring-2-0-21162 logs monitoring-0 -c monitoring ++ mktemp + local LAST_OUT=/tmp/tmp.nHB3taIpKP ++ mktemp + local LAST_ERR=/tmp/tmp.cUwHxisXQg + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-21162 logs monitoring-0 -c monitoring + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.nHB3taIpKP + cat /tmp/tmp.cUwHxisXQg + rm /tmp/tmp.nHB3taIpKP /tmp/tmp.cUwHxisXQg + return 0 + echo logs saved in: /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt logs saved in: /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-monitoring-0-monitoring.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + for p in '$pods' ++ kubectl_bin -n monitoring-2-0-21162 get pod psmdb-client-6f7f47c4b-pcdjc -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pRGaXEJWfx +++ mktemp ++ local LAST_ERR=/tmp/tmp.cWZYNGZWuc ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n monitoring-2-0-21162 get pod psmdb-client-6f7f47c4b-pcdjc -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.pRGaXEJWfx ++ cat /tmp/tmp.cWZYNGZWuc ++ rm /tmp/tmp.pRGaXEJWfx /tmp/tmp.cWZYNGZWuc ++ return 0 + containers=psmdb-client + for c in '$containers' + [[ psmdb-client =~ pmm ]] + kubectl_bin -n monitoring-2-0-21162 logs psmdb-client-6f7f47c4b-pcdjc -c psmdb-client ++ mktemp + local LAST_OUT=/tmp/tmp.1fjvKTUiXQ ++ mktemp + local LAST_ERR=/tmp/tmp.mE5lucBkps + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n monitoring-2-0-21162 logs psmdb-client-6f7f47c4b-pcdjc -c psmdb-client + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.1fjvKTUiXQ + cat /tmp/tmp.mE5lucBkps + rm /tmp/tmp.1fjvKTUiXQ /tmp/tmp.mE5lucBkps + return 0 + echo logs saved in: /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt logs saved in: /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-psmdb-client-6f7f47c4b-pcdjc-psmdb-client.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + '[' -n psmdb-operator ']' ++ kubectl_bin -n psmdb-operator get pods -o name +++ mktemp ++ local LAST_OUT=/tmp/tmp.ujOgV1kRXa ++ awk -F / '{print $2}' +++ mktemp ++ local LAST_ERR=/tmp/tmp.iosmxvVyII ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pods -o name ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ujOgV1kRXa ++ cat /tmp/tmp.iosmxvVyII ++ rm /tmp/tmp.ujOgV1kRXa /tmp/tmp.iosmxvVyII ++ return 0 + pods=percona-server-mongodb-operator-bdc5b774b-wb586 + collect_logs psmdb-operator + local containers + local count + NS=psmdb-operator + for p in '$pods' ++ kubectl_bin -n psmdb-operator get pod percona-server-mongodb-operator-bdc5b774b-wb586 -o 'jsonpath={.spec.containers[*].name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NYJ0xQo26u +++ mktemp ++ local LAST_ERR=/tmp/tmp.itlbkzRX8w ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl -n psmdb-operator get pod percona-server-mongodb-operator-bdc5b774b-wb586 -o 'jsonpath={.spec.containers[*].name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.NYJ0xQo26u ++ cat /tmp/tmp.itlbkzRX8w ++ rm /tmp/tmp.NYJ0xQo26u /tmp/tmp.itlbkzRX8w ++ return 0 + containers=percona-server-mongodb-operator + for c in '$containers' + [[ percona-server-mongodb-operator =~ pmm ]] + kubectl_bin -n psmdb-operator logs percona-server-mongodb-operator-bdc5b774b-wb586 -c percona-server-mongodb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.9O3BM4mBkt ++ mktemp + local LAST_ERR=/tmp/tmp.crEVFwcdgQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n psmdb-operator logs percona-server-mongodb-operator-bdc5b774b-wb586 -c percona-server-mongodb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.9O3BM4mBkt + cat /tmp/tmp.crEVFwcdgQ + rm /tmp/tmp.9O3BM4mBkt /tmp/tmp.crEVFwcdgQ + return 0 + echo logs saved in: /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt logs saved in: /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- backup123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- clusterMonitor123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- databaseAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- userAdmin123456 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- YmFja3VwMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3RlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- Y2x1c3Rlck1vbml0b3IxMjM0NTY= /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- ZGF0YWJhc2VBZG1pbjEyMzQ1Ng== /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + for pass in '$passwords' ++ grep -c --fixed-strings -- dXNlckFkbWluMTIzNDU2 /tmp/tmp.JgvImX9by8/logs_output-percona-server-mongodb-operator-bdc5b774b-wb586-percona-server-mongodb-operator.txt ++ : + count=0 + [[ 0 != 0 ]] + echo + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-21162 + local namespace=monitoring-2-0-21162 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false ']' + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.iZdATGhHv1 ++ mktemp + local LAST_ERR=/tmp/tmp.pNeJsfciaP + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.iZdATGhHv1 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.pNeJsfciaP + rm /tmp/tmp.iZdATGhHv1 /tmp/tmp.pNeJsfciaP + return 0 ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/crd.yaml ++ grep -v '\-\-\-' + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.hACpPhSS1Y ++ mktemp + local LAST_ERR=/tmp/tmp.wrKe0ckOTk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.hACpPhSS1Y + cat /tmp/tmp.wrKe0ckOTk + rm /tmp/tmp.hACpPhSS1Y /tmp/tmp.wrKe0ckOTk + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.WiSWjm63X9 ++ mktemp + local LAST_ERR=/tmp/tmp.jM5ixlJtPv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WiSWjm63X9 + cat /tmp/tmp.jM5ixlJtPv + rm /tmp/tmp.WiSWjm63X9 /tmp/tmp.jM5ixlJtPv + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.BJnlSJqOE9 ++ mktemp + local LAST_ERR=/tmp/tmp.AKWKyt7P2A + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BJnlSJqOE9 + cat /tmp/tmp.AKWKyt7P2A + rm /tmp/tmp.BJnlSJqOE9 /tmp/tmp.AKWKyt7P2A + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.ulFNxSOwjD ++ mktemp + local LAST_ERR=/tmp/tmp.naiZNFJ09N + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1612/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ulFNxSOwjD clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.naiZNFJ09N + rm /tmp/tmp.ulFNxSOwjD /tmp/tmp.naiZNFJ09N + return 0 + destroy_cert_manager + kubectl_bin delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NsZYERH26z ++ mktemp + local LAST_ERR=/tmp/tmp.z9vlXDc43c + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.NsZYERH26z namespace "cert-manager" deleted customresourcedefinition.apiextensions.k8s.io "certificaterequests.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "certificates.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "challenges.acme.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "clusterissuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "issuers.cert-manager.io" deleted customresourcedefinition.apiextensions.k8s.io "orders.acme.cert-manager.io" deleted serviceaccount "cert-manager-cainjector" deleted serviceaccount "cert-manager" deleted serviceaccount "cert-manager-webhook" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-cluster-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-view" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-edit" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrole.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-cainjector" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-issuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificates" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-orders" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-challenges" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" deleted clusterrolebinding.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" deleted role.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted role.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager:leaderelection" deleted rolebinding.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" deleted service "cert-manager" deleted deployment.apps "cert-manager-cainjector" deleted deployment.apps "cert-manager" deleted deployment.apps "cert-manager-webhook" deleted mutatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted validatingwebhookconfiguration.admissionregistration.k8s.io "cert-manager-webhook" deleted + cat /tmp/tmp.z9vlXDc43c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.NsZYERH26z namespace "cert-manager" deleted + cat /tmp/tmp.z9vlXDc43c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.NsZYERH26z + cat /tmp/tmp.z9vlXDc43c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + sleep 8 + cat /tmp/tmp.NsZYERH26z + cat /tmp/tmp.z9vlXDc43c Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": namespaces "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificaterequests.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "certificates.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "challenges.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "clusterissuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "issuers.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": customresourcedefinitions.apiextensions.k8s.io "orders.acme.cert-manager.io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": serviceaccounts "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-cluster-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-view" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-edit" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterroles.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-issuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-clusterissuers" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificates" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-orders" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-challenges" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-ingress-shim" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-approve:cert-manager-io" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-controller-certificatesigningrequests" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": clusterrolebindings.rbac.authorization.k8s.io "cert-manager-webhook:subjectaccessreviews" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": roles.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-cainjector:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager:leaderelection" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": rolebindings.rbac.authorization.k8s.io "cert-manager-webhook:dynamic-serving" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": services "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-cainjector" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": deployments.apps "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": mutatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found Error from server (NotFound): error when deleting "https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml": validatingwebhookconfigurations.admissionregistration.k8s.io "cert-manager-webhook" not found + rm /tmp/tmp.NsZYERH26z /tmp/tmp.z9vlXDc43c + return 1 + true + '[' -n '' ']' + '[' -n psmdb-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-21162 + rm -rf /tmp/tmp.JgvImX9by8 + kubectl_bin delete --grace-period=0 --force=true namespace psmdb-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.4tpOukWEx3 + desc 'test passed' + set +o xtrace + local LAST_OUT=/tmp/tmp.ODi44oPx1s ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.Y63hUGOUFK + local exit_status=0 + local timeout=4 + local LAST_ERR=/tmp/tmp.3IipFRKlAj + local exit_status=0 + local timeout=4 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + for i in '$(seq 0 2)' + set +e + set +e + kubectl delete --grace-period=0 --force=true namespace psmdb-operator + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-21162