Log: /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/logs/monitoring-2-0.log WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.27) exceeds the supported minor version skew of +/-1 + create_infra monitoring-2-0-4042 + local ns=monitoring-2-0-4042 + delete_crd + desc 'get and delete old CRDs and RBAC' + set +o xtrace ----------------------------------------------------------------------------------- get and delete old CRDs and RBAC ----------------------------------------------------------------------------------- + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml --ignore-not-found --wait=false ++ mktemp + local LAST_OUT=/tmp/tmp.GszhFWDGQ6 ++ mktemp + local LAST_ERR=/tmp/tmp.HHEZ65bv98 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml --ignore-not-found --wait=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.GszhFWDGQ6 customresourcedefinition.apiextensions.k8s.io "perconaservermongodbbackups.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbrestores.psmdb.percona.com" deleted customresourcedefinition.apiextensions.k8s.io "perconaservermongodbs.psmdb.percona.com" deleted + cat /tmp/tmp.HHEZ65bv98 + rm /tmp/tmp.GszhFWDGQ6 /tmp/tmp.HHEZ65bv98 + return 0 ++ grep -v '\-\-\-' ++ yq eval .metadata.name /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + grep -v NAMESPACE + kubectl get perconaservermongodbbackups.psmdb.percona.com --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbbackups.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbbackups" + kubectl patch perconaservermongodbbackups.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbbackups" + : + kubectl_bin wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.E6vnul1svO ++ mktemp + local LAST_ERR=/tmp/tmp.gyfMIOE3Yf + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbbackups.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.E6vnul1svO + cat /tmp/tmp.gyfMIOE3Yf + rm /tmp/tmp.E6vnul1svO /tmp/tmp.gyfMIOE3Yf + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbrestores.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbrestores.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbrestores" + kubectl patch perconaservermongodbrestores.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbrestores" + : + kubectl_bin wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.mvU3hiAYSQ ++ mktemp + local LAST_ERR=/tmp/tmp.OsnjVfck6E + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbrestores.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.mvU3hiAYSQ + cat /tmp/tmp.OsnjVfck6E + rm /tmp/tmp.mvU3hiAYSQ /tmp/tmp.OsnjVfck6E + return 0 + for crd_name in '$(yq eval '\''.metadata.name'\'' "${src_dir}/deploy/crd.yaml" | grep -v '\''\-\-\-'\'')' + kubectl get perconaservermongodbs.psmdb.percona.com --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch perconaservermongodbs.psmdb.percona.com -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' error: the server doesn't have a resource type "perconaservermongodbs" + kubectl patch perconaservermongodbs.psmdb.percona.com -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: the server doesn't have a resource type "perconaservermongodbs" + : + kubectl_bin wait --for=delete crd perconaservermongodbs.psmdb.percona.com ++ mktemp + local LAST_OUT=/tmp/tmp.DLa55cvdZO ++ mktemp + local LAST_ERR=/tmp/tmp.ZCAOv8Ew1i + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete crd perconaservermongodbs.psmdb.percona.com + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.DLa55cvdZO + cat /tmp/tmp.ZCAOv8Ew1i + rm /tmp/tmp.DLa55cvdZO /tmp/tmp.ZCAOv8Ew1i + return 0 + local rbac_yaml=rbac.yaml + '[' -n psmdb-operator ']' + rbac_yaml=cw-rbac.yaml + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/cw-rbac.yaml --ignore-not-found ++ mktemp + local LAST_OUT=/tmp/tmp.RIlcTuQY1Y ++ mktemp + local LAST_ERR=/tmp/tmp.9ywoDYJjKZ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/cw-rbac.yaml --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RIlcTuQY1Y clusterrole.rbac.authorization.k8s.io "percona-server-mongodb-operator" deleted clusterrolebinding.rbac.authorization.k8s.io "service-account-percona-server-mongodb-operator" deleted + cat /tmp/tmp.9ywoDYJjKZ + rm /tmp/tmp.RIlcTuQY1Y /tmp/tmp.9ywoDYJjKZ + return 0 + check_crd_for_deletion PR-1583-58b42b0e + local git_tag=PR-1583-58b42b0e ++ curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/PR-1583-58b42b0e/deploy/crd.yaml ++ yq eval .metadata.name ++ /usr/bin/sed ':a;N;$!ba;s/\n/ /g' ++ /usr/bin/sed s/---//g + for crd_name in '$(curl -s https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/crd.yaml | yq eval '\''.metadata.name'\'' | $sed '\''s/---//g'\'' | $sed '\'':a;N;$!ba;s/\n/ /g'\'')' ++ kubectl_bin get crd/null -o 'jsonpath={.status.conditions[-1].type}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MYvtGytti8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.DoWSe4KHpV ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.MYvtGytti8 ++ cat /tmp/tmp.DoWSe4KHpV Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 0 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.MYvtGytti8 ++ cat /tmp/tmp.DoWSe4KHpV Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 4 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get crd/null -o 'jsonpath={.status.conditions[-1].type}' ++ exit_status=1 ++ set -e ++ '[' 1 '!=' 0 -a -n 1 ']' ++ cat /tmp/tmp.MYvtGytti8 ++ cat /tmp/tmp.DoWSe4KHpV Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ sleep 8 ++ cat /tmp/tmp.MYvtGytti8 ++ cat /tmp/tmp.DoWSe4KHpV Error from server (NotFound): customresourcedefinitions.apiextensions.k8s.io "null" not found ++ rm /tmp/tmp.MYvtGytti8 /tmp/tmp.DoWSe4KHpV ++ return 1 + [[ '' == \T\e\r\m\i\n\a\t\i\n\g ]] + '[' -n psmdb-operator ']' + create_namespace psmdb-operator + local namespace=psmdb-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace psmdb-operator --ignore-not-found + xargs kubectl delete ns + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.Fg3eclKuwF ++ mktemp + local LAST_OUT=/tmp/tmp.LIBSjpd16o ++ mktemp + local LAST_ERR=/tmp/tmp.bbWb8UFf20 + local exit_status=0 + local timeout=4 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.kZcFV6cO8X + local exit_status=0 + local timeout=4 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace psmdb-operator --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fg3eclKuwF + cat /tmp/tmp.bbWb8UFf20 + rm /tmp/tmp.Fg3eclKuwF /tmp/tmp.bbWb8UFf20 + return 0 namespace "cert-manager" deleted namespace "gmp-public" deleted namespace "gmp-system" deleted namespace "monitoring-2-0-1932" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.LIBSjpd16o namespace "psmdb-operator" deleted + cat /tmp/tmp.kZcFV6cO8X + rm /tmp/tmp.LIBSjpd16o /tmp/tmp.kZcFV6cO8X + return 0 + kubectl_bin wait --for=delete namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.m2Kk3pODXS ++ mktemp + local LAST_ERR=/tmp/tmp.AXIzVLApZX + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.m2Kk3pODXS + cat /tmp/tmp.AXIzVLApZX + rm /tmp/tmp.m2Kk3pODXS /tmp/tmp.AXIzVLApZX + return 0 + desc 'create namespace psmdb-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace psmdb-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rL4cbmf2sU ++ mktemp + local LAST_ERR=/tmp/tmp.GSBD5SX8Pv + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.rL4cbmf2sU namespace/psmdb-operator created + cat /tmp/tmp.GSBD5SX8Pv + rm /tmp/tmp.rL4cbmf2sU /tmp/tmp.GSBD5SX8Pv + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.SqlRScobi8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.vwujbvwTmi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.SqlRScobi8 ++ cat /tmp/tmp.vwujbvwTmi ++ rm /tmp/tmp.SqlRScobi8 /tmp/tmp.vwujbvwTmi ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1 --namespace=psmdb-operator ++ mktemp + local LAST_OUT=/tmp/tmp.BEzpJQ7Kv2 ++ mktemp + local LAST_ERR=/tmp/tmp.EsKNEsaqON + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1 --namespace=psmdb-operator + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BEzpJQ7Kv2 Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1" modified. + cat /tmp/tmp.EsKNEsaqON + rm /tmp/tmp.BEzpJQ7Kv2 /tmp/tmp.EsKNEsaqON + return 0 + deploy_operator + desc 'start PSMDB operator' + set +o xtrace ----------------------------------------------------------------------------------- start PSMDB operator ----------------------------------------------------------------------------------- + local cr_file + '[' -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/crd.yaml ']' + cr_file=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.17qCp44xAj ++ mktemp + local LAST_ERR=/tmp/tmp.ABZBP0YyU3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.17qCp44xAj customresourcedefinition.apiextensions.k8s.io/perconaservermongodbbackups.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbrestores.psmdb.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaservermongodbs.psmdb.percona.com serverside-applied + cat /tmp/tmp.ABZBP0YyU3 + rm /tmp/tmp.17qCp44xAj /tmp/tmp.ABZBP0YyU3 + return 0 + '[' -n psmdb-operator ']' + apply_rbac cw-rbac + local operator_namespace=psmdb-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: psmdb-operator^' + kubectl_bin apply -n psmdb-operator -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ScMdKq1sih ++ mktemp + local LAST_ERR=/tmp/tmp.l27jxY4bo6 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -n psmdb-operator -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ScMdKq1sih clusterrole.rbac.authorization.k8s.io/percona-server-mongodb-operator created serviceaccount/percona-server-mongodb-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-server-mongodb-operator created + cat /tmp/tmp.l27jxY4bo6 + rm /tmp/tmp.ScMdKq1sih /tmp/tmp.l27jxY4bo6 + return 0 + kubectl_bin apply -f - + yq eval ' (.spec.template.spec.containers[].image = "perconalab/percona-server-mongodb-operator:PR-1583-58b42b0e") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ISJpgshRKj ++ mktemp + local LAST_ERR=/tmp/tmp.TLwmx4whfy + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.ISJpgshRKj deployment.apps/percona-server-mongodb-operator created + cat /tmp/tmp.TLwmx4whfy + rm /tmp/tmp.ISJpgshRKj /tmp/tmp.TLwmx4whfy + return 0 + sleep 2 ++ get_operator_pod ++ kubectl_bin get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.mzeQtaqJGY +++ mktemp ++ local LAST_ERR=/tmp/tmp.hLMlgxJlSR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=percona-server-mongodb-operator -o 'jsonpath={.items[].metadata.name}' -n psmdb-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.mzeQtaqJGY ++ cat /tmp/tmp.hLMlgxJlSR ++ rm /tmp/tmp.mzeQtaqJGY /tmp/tmp.hLMlgxJlSR ++ return 0 + wait_pod percona-server-mongodb-operator-58b9b5f5f8-jwv2s + local pod=percona-server-mongodb-operator-58b9b5f5f8-jwv2s + set +o xtrace waiting for pod/percona-server-mongodb-operator-58b9b5f5f8-jwv2s to be ready.OK + create_namespace monitoring-2-0-4042 + local namespace=monitoring-2-0-4042 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + desc 'destroy chaos-mesh' + set +o xtrace ----------------------------------------------------------------------------------- destroy chaos-mesh ----------------------------------------------------------------------------------- + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-4042' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-4042 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-4042 --ignore-not-found + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|psmdb-operator|openshift|gke-mcs|^NAME' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.MYopNK1ILI ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.YyOuKqR4FH ++ mktemp + local LAST_ERR=/tmp/tmp.VODZoOFsJ3 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.KMMqnRoQRV + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-4042 --ignore-not-found + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MYopNK1ILI + cat /tmp/tmp.VODZoOFsJ3 + rm /tmp/tmp.MYopNK1ILI /tmp/tmp.VODZoOFsJ3 + return 0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.YyOuKqR4FH + cat /tmp/tmp.KMMqnRoQRV + rm /tmp/tmp.YyOuKqR4FH /tmp/tmp.KMMqnRoQRV + return 0 + kubectl_bin wait --for=delete namespace monitoring-2-0-4042 ++ mktemp + local LAST_OUT=/tmp/tmp.zmSldtEp59 ++ mktemp + local LAST_ERR=/tmp/tmp.QoJSIk4xFh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=delete namespace monitoring-2-0-4042 namespace "gmp-public" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.zmSldtEp59 + cat /tmp/tmp.QoJSIk4xFh + rm /tmp/tmp.zmSldtEp59 /tmp/tmp.QoJSIk4xFh + return 0 + desc 'create namespace monitoring-2-0-4042' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-4042 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-4042 ++ mktemp + local LAST_OUT=/tmp/tmp.sX0rOnoeD8 ++ mktemp + local LAST_ERR=/tmp/tmp.3roU8bOOgR + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-4042 namespace "gmp-system" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.sX0rOnoeD8 namespace/monitoring-2-0-4042 created + cat /tmp/tmp.3roU8bOOgR + rm /tmp/tmp.sX0rOnoeD8 /tmp/tmp.3roU8bOOgR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.r14GqESVFj +++ mktemp ++ local LAST_ERR=/tmp/tmp.oSg5g6ArKe ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.r14GqESVFj ++ cat /tmp/tmp.oSg5g6ArKe ++ rm /tmp/tmp.r14GqESVFj /tmp/tmp.oSg5g6ArKe ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1 --namespace=monitoring-2-0-4042 ++ mktemp + local LAST_OUT=/tmp/tmp.vkC2GVDzvu ++ mktemp + local LAST_ERR=/tmp/tmp.WBzBJOGqq8 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1 --namespace=monitoring-2-0-4042 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.vkC2GVDzvu Context "gke_cloud-dev-112233_us-central1-a_jen-psmdb-1583-58b42b0e-9-cluster1" modified. + cat /tmp/tmp.WBzBJOGqq8 + rm /tmp/tmp.vkC2GVDzvu /tmp/tmp.WBzBJOGqq8 + return 0 + deploy_cert_manager + desc 'deploy cert manager' + set +o xtrace ----------------------------------------------------------------------------------- deploy cert manager ----------------------------------------------------------------------------------- + kubectl_bin create namespace cert-manager ++ mktemp + local LAST_OUT=/tmp/tmp.RFk5CG8Qkc ++ mktemp + local LAST_ERR=/tmp/tmp.gBouVIYb0h + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace cert-manager + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.RFk5CG8Qkc namespace/cert-manager created + cat /tmp/tmp.gBouVIYb0h + rm /tmp/tmp.RFk5CG8Qkc /tmp/tmp.gBouVIYb0h + return 0 + kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true ++ mktemp + local LAST_OUT=/tmp/tmp.raQkpHHWNt ++ mktemp + local LAST_ERR=/tmp/tmp.3a983XV8S5 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.raQkpHHWNt namespace/cert-manager labeled + cat /tmp/tmp.3a983XV8S5 + rm /tmp/tmp.raQkpHHWNt /tmp/tmp.3a983XV8S5 + return 0 + kubectl_bin apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false ++ mktemp + local LAST_OUT=/tmp/tmp.dDsZ2IcRU8 ++ mktemp + local LAST_ERR=/tmp/tmp.BbtY8qbUrT + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.1/cert-manager.yaml --validate=false + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.dDsZ2IcRU8 namespace/cert-manager configured customresourcedefinition.apiextensions.k8s.io/certificaterequests.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/certificates.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/challenges.acme.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/clusterissuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/issuers.cert-manager.io unchanged customresourcedefinition.apiextensions.k8s.io/orders.acme.cert-manager.io unchanged serviceaccount/cert-manager-cainjector created serviceaccount/cert-manager created serviceaccount/cert-manager-webhook created clusterrole.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-cluster-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-view unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-edit unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrole.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-cainjector unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-issuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-clusterissuers unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificates unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-orders unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-challenges unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-ingress-shim unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-approve:cert-manager-io unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-controller-certificatesigningrequests unchanged clusterrolebinding.rbac.authorization.k8s.io/cert-manager-webhook:subjectaccessreviews configured role.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager:leaderelection unchanged role.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created rolebinding.rbac.authorization.k8s.io/cert-manager-cainjector:leaderelection unchanged rolebinding.rbac.authorization.k8s.io/cert-manager:leaderelection configured rolebinding.rbac.authorization.k8s.io/cert-manager-webhook:dynamic-serving created service/cert-manager created service/cert-manager-webhook created deployment.apps/cert-manager-cainjector created deployment.apps/cert-manager created deployment.apps/cert-manager-webhook created mutatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured validatingwebhookconfiguration.admissionregistration.k8s.io/cert-manager-webhook configured + cat /tmp/tmp.BbtY8qbUrT Warning: resource namespaces/cert-manager is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. + rm /tmp/tmp.dDsZ2IcRU8 /tmp/tmp.BbtY8qbUrT + return 0 + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready ++ mktemp + local LAST_OUT=/tmp/tmp.enPnUfojt3 ++ mktemp + local LAST_ERR=/tmp/tmp.uOnvfaE0j7 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.enPnUfojt3 pod/cert-manager-bd44d64d-s5dss condition met pod/cert-manager-cainjector-7dcddbd8b9-jkbrm condition met pod/cert-manager-webhook-6cc8fdfd7-68sp2 condition met + cat /tmp/tmp.uOnvfaE0j7 + rm /tmp/tmp.enPnUfojt3 /tmp/tmp.uOnvfaE0j7 + return 0 + sleep 120 + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove stable "stable" has been removed from your repositories + helm repo add stable https://charts.helm.sh/stable "stable" has been added to your repositories + [[ -n '' ]] + retry 10 60 helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Jul 10 11:44:23 2024 NAMESPACE: monitoring-2-0-4042 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-4042.svc.cluster.local:443 login: admin password: admin + sleep 20 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.q8lqkw940U ++ mktemp + local LAST_ERR=/tmp/tmp.GgQPAJCmqQ + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.q8lqkw940U + cat /tmp/tmp.GgQPAJCmqQ error: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.q8lqkw940U + cat /tmp/tmp.GgQPAJCmqQ error: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.q8lqkw940U + cat /tmp/tmp.GgQPAJCmqQ error: unable to upgrade connection: container not found ("monitoring") + sleep 8 + cat /tmp/tmp.q8lqkw940U + cat /tmp/tmp.GgQPAJCmqQ error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.q8lqkw940U /tmp/tmp.GgQPAJCmqQ + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.TaRrVWyewH ++ mktemp + local LAST_ERR=/tmp/tmp.VWz23yjaXr + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TaRrVWyewH + cat /tmp/tmp.VWz23yjaXr error: unable to upgrade connection: container not found ("monitoring") + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TaRrVWyewH + cat /tmp/tmp.VWz23yjaXr error: unable to upgrade connection: container not found ("monitoring") + sleep 4 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=1 + set -e + '[' 1 '!=' 0 -a -n 1 ']' + cat /tmp/tmp.TaRrVWyewH + cat /tmp/tmp.VWz23yjaXr command terminated with exit code 1 + sleep 8 + cat /tmp/tmp.TaRrVWyewH + cat /tmp/tmp.VWz23yjaXr command terminated with exit code 1 + rm /tmp/tmp.TaRrVWyewH /tmp/tmp.VWz23yjaXr + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.q7AmPf00h8 ++ mktemp + local LAST_ERR=/tmp/tmp.rEeyYbys2j + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.q7AmPf00h8 + cat /tmp/tmp.rEeyYbys2j + rm /tmp/tmp.q7AmPf00h8 /tmp/tmp.rEeyYbys2j + return 0 + cluster=monitoring + desc 'create secrets and start client' + set +o xtrace ----------------------------------------------------------------------------------- create secrets and start client ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.2P38o5Faja ++ mktemp + local LAST_ERR=/tmp/tmp.aMq0gds5T2 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/conf/secrets.yml -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.2P38o5Faja secret/some-users created secret/some-users unchanged + cat /tmp/tmp.aMq0gds5T2 + rm /tmp/tmp.2P38o5Faja /tmp/tmp.aMq0gds5T2 + return 0 + yq '.spec.template.spec.volumes[0].secret.secretName="monitoring-ssl"' /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/conf/client_with_tls.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.BuVjBoVqcm ++ mktemp + local LAST_ERR=/tmp/tmp.uVezpYkdES + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.BuVjBoVqcm deployment.apps/psmdb-client created + cat /tmp/tmp.uVezpYkdES + rm /tmp/tmp.BuVjBoVqcm /tmp/tmp.uVezpYkdES + return 0 + sleep 90 + desc 'create first PSMDB cluster monitoring' + set +o xtrace ----------------------------------------------------------------------------------- create first PSMDB cluster monitoring ----------------------------------------------------------------------------------- + apply_cluster /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml + yq eval '(.spec | select(.image == null)).image = "perconalab/percona-server-mongodb-operator:main-mongod7.0"' + yq eval '(.spec | select(has("backup"))).backup.image = "perconalab/percona-server-mongodb-operator:main-backup"' + yq eval '(.spec | select(has("pmm"))).pmm.image = "perconalab/pmm-client:dev-latest"' + yq eval '(.spec | select(has("initImage"))).initImage = "perconalab/percona-server-mongodb-operator:PR-1583-58b42b0e"' + cat /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/conf/monitoring-rs0.yml ++ mktemp + local LAST_OUT=/tmp/tmp.oGsp8el9tE ++ mktemp + local LAST_ERR=/tmp/tmp.cKA1lmYljh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + yq eval '.spec.upgradeOptions.apply="Never"' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.oGsp8el9tE perconaservermongodb.psmdb.percona.com/monitoring created + cat /tmp/tmp.cKA1lmYljh + rm /tmp/tmp.oGsp8el9tE /tmp/tmp.cKA1lmYljh + return 0 + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready...........OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.............OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.P1CTeHX7e6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TPZ6IgK9VF ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.P1CTeHX7e6 ++ cat /tmp/tmp.TPZ6IgK9VF ++ rm /tmp/tmp.P1CTeHX7e6 /tmp/tmp.TPZ6IgK9VF ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready..............OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.uHVQOwm28h +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zel2jXtjHd ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.uHVQOwm28h ++ cat /tmp/tmp.Zel2jXtjHd ++ rm /tmp/tmp.uHVQOwm28h /tmp/tmp.Zel2jXtjHd ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness..................... + desc 'check if pmm-client container is not enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container is not enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 -no-pmm + local resource=statefulset/monitoring-rs0 + local postfix=-no-pmm + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml + local new_result=/tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.WU1GBCSlg8 ++ mktemp + local LAST_ERR=/tmp/tmp.hTgddla9Zh + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.WU1GBCSlg8 + cat /tmp/tmp.hTgddla9Zh + rm /tmp/tmp.WU1GBCSlg8 /tmp/tmp.hTgddla9Zh + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-no-pmm.yml /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + sleep 10 + run_mongos 'db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-4042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})' + local uri=userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-4042 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FEwgbS4D92 +++ mktemp ++ local LAST_ERR=/tmp/tmp.jwrube6PMR ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FEwgbS4D92 ++ cat /tmp/tmp.jwrube6PMR ++ rm /tmp/tmp.FEwgbS4D92 /tmp/tmp.jwrube6PMR ++ return 0 + local client_container=psmdb-client-6f7f47c4b-v5mgk + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.8mU1iFEnpo ++ mktemp + local LAST_ERR=/tmp/tmp.r2EZ3KEo9q + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''db.createUser({user:"myApp",pwd:"myPass",roles:[{db:"myApp",role:"readWrite"}]})\n'\'' | mongo mongodb://userAdmin:userAdmin123456@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.8mU1iFEnpo Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-4042.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-10T11:49:36.159Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("fce1ce75-9790-4821-8cd2-aadedfdc5e01") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match Successfully added user: { "user" : "myApp", "roles" : [ { "db" : "myApp", "role" : "readWrite" } ] } bye + cat /tmp/tmp.r2EZ3KEo9q + rm /tmp/tmp.8mU1iFEnpo /tmp/tmp.r2EZ3KEo9q + return 0 + run_mongos 'sh.enableSharding("myApp")' clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-4042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=sh.enableSharding("myApp")' + local uri=clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-4042 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.k7j20FR7eS +++ mktemp ++ local LAST_ERR=/tmp/tmp.nNPSE2Wn3d ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.k7j20FR7eS ++ cat /tmp/tmp.nNPSE2Wn3d ++ rm /tmp/tmp.k7j20FR7eS /tmp/tmp.nNPSE2Wn3d ++ return 0 + local client_container=psmdb-client-6f7f47c4b-v5mgk + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Y6rEwZWbf6 ++ mktemp + local LAST_ERR=/tmp/tmp.AHruPjHkBs + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''sh.enableSharding("myApp")\n'\'' | mongo mongodb://clusterAdmin:clusterAdmin123456@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Y6rEwZWbf6 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-4042.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-10T11:49:38.636Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("c236702b-9f60-44b0-bd09-1fba3f3d0038") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match { "ok" : 1, "$clusterTime" : { "clusterTime" : Timestamp(1720612178, 9), "signature" : { "hash" : BinData(0,"16UPRh1XgZbAn3jdJyjAZq/2gl8="), "keyId" : NumberLong("7389972685716979718") } }, "operationTime" : Timestamp(1720612178, 3) } bye + cat /tmp/tmp.AHruPjHkBs + rm /tmp/tmp.Y6rEwZWbf6 /tmp/tmp.AHruPjHkBs + return 0 + insert_data_mongos 100500 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100500 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100500 })' myApp:myPass@monitoring-mongos.monitoring-2-0-4042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100500 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-4042 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W5v2PSh9wr +++ mktemp ++ local LAST_ERR=/tmp/tmp.3lDYtswwrA ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.W5v2PSh9wr ++ cat /tmp/tmp.3lDYtswwrA ++ rm /tmp/tmp.W5v2PSh9wr /tmp/tmp.3lDYtswwrA ++ return 0 + local client_container=psmdb-client-6f7f47c4b-v5mgk + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.Gl4RlCH5mJ ++ mktemp + local LAST_ERR=/tmp/tmp.scA78K9Bka + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100500 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Gl4RlCH5mJ Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-4042.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-10T11:49:41.249Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("520f92a3-0321-4ab0-ba3a-2fc0d32c0d71") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.scA78K9Bka + rm /tmp/tmp.Gl4RlCH5mJ /tmp/tmp.scA78K9Bka + return 0 + insert_data_mongos 100600 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100600 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100600 })' myApp:myPass@monitoring-mongos.monitoring-2-0-4042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100600 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-4042 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TN9NuhgNkx +++ mktemp ++ local LAST_ERR=/tmp/tmp.84h9NNI5zi ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.TN9NuhgNkx ++ cat /tmp/tmp.84h9NNI5zi ++ rm /tmp/tmp.TN9NuhgNkx /tmp/tmp.84h9NNI5zi ++ return 0 + local client_container=psmdb-client-6f7f47c4b-v5mgk + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.AFccdqnBW8 ++ mktemp + local LAST_ERR=/tmp/tmp.fa1gC5kvU9 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100600 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.AFccdqnBW8 Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-4042.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-10T11:49:44.532Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("a228d5a0-4109-48dd-9752-585479ee453c") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.fa1gC5kvU9 + rm /tmp/tmp.AFccdqnBW8 /tmp/tmp.fa1gC5kvU9 + return 0 + insert_data_mongos 100700 myApp '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local data=100700 + local db_name=myApp + local 'flags=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + run_mongos 'use myApp\n db.test.insert({ x: 100700 })' myApp:myPass@monitoring-mongos.monitoring-2-0-4042 '' '' '--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + local 'command=use myApp\n db.test.insert({ x: 100700 })' + local uri=myApp:myPass@monitoring-mongos.monitoring-2-0-4042 + local driver=mongodb + local suffix=.svc.cluster.local ++ kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.rPmj9YgIbb +++ mktemp ++ local LAST_ERR=/tmp/tmp.HXC5ulkujM ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.rPmj9YgIbb ++ cat /tmp/tmp.HXC5ulkujM ++ rm /tmp/tmp.rPmj9YgIbb /tmp/tmp.HXC5ulkujM ++ return 0 + local client_container=psmdb-client-6f7f47c4b-v5mgk + local 'mongo_flag=--tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + kubectl_bin exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' ++ mktemp + local LAST_OUT=/tmp/tmp.qOAebReuTY ++ mktemp + local LAST_ERR=/tmp/tmp.yjRMCakfAn + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec psmdb-client-6f7f47c4b-v5mgk -- bash -c 'printf '\''use myApp\n db.test.insert({ x: 100700 })\n'\'' | mongo mongodb://myApp:myPass@monitoring-mongos.monitoring-2-0-4042.svc.cluster.local/admin --tlsCertificateKeyFile /tmp/tls.pem --tlsCAFile /etc/mongodb-ssl/ca.crt --tls' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.qOAebReuTY Percona Server for MongoDB shell version v4.4.29-28 connecting to: mongodb://monitoring-mongos.monitoring-2-0-4042.svc.cluster.local:27017/admin?compressors=disabled&gssapiServiceName=mongodb {"t":{"$date":"2024-07-10T11:49:47.724Z"},"s":"I", "c":"NETWORK", "id":5490002, "ctx":"thread1","msg":"Started a new thread for the timer service"} Implicit session: session { "id" : UUID("6a1326e7-af9c-4e1b-9e99-29c9b7806d4b") } Percona Server for MongoDB server version: v7.0.11-6 WARNING: shell and server versions do not match switched to db myApp WriteResult({ "nInserted" : 1 }) bye + cat /tmp/tmp.yjRMCakfAn + rm /tmp/tmp.qOAebReuTY /tmp/tmp.yjRMCakfAn + return 0 + desc 'add PMM_SERVER_API_KEY for secret some-users' + set +o xtrace ----------------------------------------------------------------------------------- add PMM_SERVER_API_KEY for secret some-users ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Bwl4FlzJ6Z +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FRqAZZToRm ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.Bwl4FlzJ6Z ++++ cat /tmp/tmp.FRqAZZToRm ++++ rm /tmp/tmp.Bwl4FlzJ6Z /tmp/tmp.FRqAZZToRm ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.XrXnca22VY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.N0VTdFtD3F ++++ local exit_status=0 ++++ local timeout=4 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 -a -n 1 ']' ++++ break ++++ cat /tmp/tmp.XrXnca22VY ++++ cat /tmp/tmp.N0VTdFtD3F ++++ rm /tmp/tmp.XrXnca22VY /tmp/tmp.N0VTdFtD3F ++++ return 0 +++ local ip=34.70.86.31 +++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' +++ echo 34.70.86.31 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.70.86.31/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 250 75 --:--:-- --:--:-- --:--:-- 326 100 155 100 119 100 36 250 75 --:--:-- --:--:-- --:--:-- 326 + API_KEY='"eyJrIjoiRFhHUTJzdTJPcWNwN2FydnJrVGhtWEJkTWgxSjd4OFkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiRFhHUTJzdTJPcWNwN2FydnJrVGhtWEJkTWgxSjd4OFkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.k4VPqLyYBY ++ mktemp + local LAST_ERR=/tmp/tmp.93dHOj6I10 + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret some-users --type merge --patch '{"stringData": {"PMM_SERVER_API_KEY": "eyJrIjoiRFhHUTJzdTJPcWNwN2FydnJrVGhtWEJkTWgxSjd4OFkiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.k4VPqLyYBY secret/some-users patched + cat /tmp/tmp.93dHOj6I10 + rm /tmp/tmp.k4VPqLyYBY /tmp/tmp.93dHOj6I10 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- + wait_for_running monitoring-rs0 3 + local name=monitoring-rs0 + let last_pod=2 + local check_cluster_readyness=true + set_debug + [[ 1 == 1 ]] + set -o xtrace + local rs_name=rs0 + local cluster_name=monitoring ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + [[ 0 -eq 2 ]] + wait_pod monitoring-rs0-0 + local pod=monitoring-rs0-0 + set +o xtrace waiting for pod/monitoring-rs0-0 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 1 -eq 2 ]] + wait_pod monitoring-rs0-1 + local pod=monitoring-rs0-1 + set +o xtrace waiting for pod/monitoring-rs0-1 to be ready.OK + for i in '$(seq 0 $last_pod)' + [[ 2 -eq 2 ]] ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ITJwKJz7Eo +++ mktemp ++ local LAST_ERR=/tmp/tmp.FIlEJbLEy1 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].arbiter.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.ITJwKJz7Eo ++ cat /tmp/tmp.FIlEJbLEy1 ++ rm /tmp/tmp.ITJwKJz7Eo /tmp/tmp.FIlEJbLEy1 ++ return 0 + [[ '' == \t\r\u\e ]] + wait_pod monitoring-rs0-2 + local pod=monitoring-rs0-2 + set +o xtrace waiting for pod/monitoring-rs0-2 to be ready.OK ++ kubectl_bin get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FhnL5cc9fv +++ mktemp ++ local LAST_ERR=/tmp/tmp.9gxJS1rKD4 ++ local exit_status=0 ++ local timeout=4 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get psmdb monitoring -o 'jsonpath={.spec.replsets[?(@.name=="rs0")].non_voting.enabled}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 -a -n 1 ']' ++ break ++ cat /tmp/tmp.FhnL5cc9fv ++ cat /tmp/tmp.9gxJS1rKD4 ++ rm /tmp/tmp.FhnL5cc9fv /tmp/tmp.9gxJS1rKD4 ++ return 0 + [[ '' == \t\r\u\e ]] + sleep 10 + [[ true == \t\r\u\e ]] + set +x Waiting for cluster readyness............................................................................................. + sleep 90 + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-rs0 + local resource=statefulset/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml + local new_result=/tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.M9eJmgG2HL ++ mktemp + local LAST_ERR=/tmp/tmp.Sz8LzRiSbd + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.M9eJmgG2HL + cat /tmp/tmp.Sz8LzRiSbd + rm /tmp/tmp.M9eJmgG2HL /tmp/tmp.Sz8LzRiSbd + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-rs0.yml /tmp/tmp.j2BpCesSt5/statefulset_monitoring-rs0.yml + compare_kubectl service/monitoring-rs0 + local resource=service/monitoring-rs0 + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml + local new_result=/tmp/tmp.j2BpCesSt5/service_monitoring-rs0.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-rs0 + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.0VTl8VIU5y ++ mktemp + local LAST_ERR=/tmp/tmp.J6iaPBl2Rj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-rs0 + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.0VTl8VIU5y + cat /tmp/tmp.J6iaPBl2Rj + rm /tmp/tmp.0VTl8VIU5y /tmp/tmp.J6iaPBl2Rj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/service_monitoring-rs0.yml + version_gt 1.22 ++ bc -l ++ echo '1.27 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/service_monitoring-rs0.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/service_monitoring-rs0.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-rs0.yml /tmp/tmp.j2BpCesSt5/service_monitoring-rs0.yml + compare_kubectl service/monitoring-mongos + local resource=service/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml + local new_result=/tmp/tmp.j2BpCesSt5/service_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml service/monitoring-mongos ++ mktemp + local LAST_OUT=/tmp/tmp.MskLJNDRyI ++ mktemp + local LAST_ERR=/tmp/tmp.1PPSeZnbai + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml service/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.MskLJNDRyI + cat /tmp/tmp.1PPSeZnbai + rm /tmp/tmp.MskLJNDRyI /tmp/tmp.1PPSeZnbai + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/service_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/service_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/service_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/service_monitoring-mongos.yml /tmp/tmp.j2BpCesSt5/service_monitoring-mongos.yml + compare_kubectl statefulset/monitoring-cfg + local resource=statefulset/monitoring-cfg + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml + local new_result=/tmp/tmp.j2BpCesSt5/statefulset_monitoring-cfg.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-cfg + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.agda7Y1hrM ++ mktemp + local LAST_ERR=/tmp/tmp.8He6Bkxgdj + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-cfg + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.agda7Y1hrM + cat /tmp/tmp.8He6Bkxgdj + rm /tmp/tmp.agda7Y1hrM /tmp/tmp.8He6Bkxgdj + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-cfg.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-cfg.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-cfg.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-cfg.yml /tmp/tmp.j2BpCesSt5/statefulset_monitoring-cfg.yml + compare_kubectl statefulset/monitoring-mongos + local resource=statefulset/monitoring-mongos + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml + local new_result=/tmp/tmp.j2BpCesSt5/statefulset_monitoring-mongos.yml + '[' -n '' -a -f /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos-oc.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-mongos + yq eval ' del(.metadata.ownerReferences[].apiVersion) | del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "NAMESPACE")) | del(.metadata.selfLink) | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.status) | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-4042", "NAME_SPACE") | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - ++ mktemp + local LAST_OUT=/tmp/tmp.Fe2lS7NCGF ++ mktemp + local LAST_ERR=/tmp/tmp.QicEMRsumk + local exit_status=0 + local timeout=4 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-mongos + exit_status=0 + set -e + '[' 0 '!=' 0 -a -n 1 ']' + break + cat /tmp/tmp.Fe2lS7NCGF + cat /tmp/tmp.QicEMRsumk + rm /tmp/tmp.Fe2lS7NCGF /tmp/tmp.QicEMRsumk + return 0 + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-mongos.yml + version_gt 1.22 ++ echo '1.27 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + yq -i eval 'del(.spec.internalTrafficPolicy)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-mongos.yml + yq -i eval 'del(.spec.allocateLoadBalancerNodePorts)' /tmp/tmp.j2BpCesSt5/statefulset_monitoring-mongos.yml + [[ /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml == */cronjob* ]] + diff -u /mnt/jenkins/workspace/cloud-psmdb-operator_PR-1583/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-mongos.yml /tmp/tmp.j2BpCesSt5/statefulset_monitoring-mongos.yml + desc 'check mongod metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongod metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-4042-monitoring-rs0-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-4042-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720612422 ++ /usr/bin/date -u +%s + local end=1720612482 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TdOpRbInAu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2LNK44FUL0 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TdOpRbInAu +++ cat /tmp/tmp.2LNK44FUL0 +++ rm /tmp/tmp.TdOpRbInAu /tmp/tmp.2LNK44FUL0 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uvqerUA3d2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MNoZKarUp2 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.uvqerUA3d2 +++ cat /tmp/tmp.MNoZKarUp2 +++ rm /tmp/tmp.uvqerUA3d2 /tmp/tmp.MNoZKarUp2 +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + local endpoint=34.70.86.31 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.70.86.31/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-rs0-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-rs0-1%22%7D%29&start=1720612422&end=1720612482&step=60' + grep '^"[0-9]' "1720610347" "1720610347" + get_metric_values mongodb_connections monitoring-2-0-4042-monitoring-rs0-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-4042-monitoring-rs0-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720612424 ++ /usr/bin/date -u +%s + local end=1720612484 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hZ9U1ryMLa ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_ERR=/tmp/tmp.omGF6Hj0gH +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hZ9U1ryMLa +++ cat /tmp/tmp.omGF6Hj0gH +++ rm /tmp/tmp.hZ9U1ryMLa /tmp/tmp.omGF6Hj0gH +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.4Gq7eps6MI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.b8F3o8vf5F +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.4Gq7eps6MI +++ cat /tmp/tmp.b8F3o8vf5F +++ rm /tmp/tmp.4Gq7eps6MI /tmp/tmp.b8F3o8vf5F +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + local endpoint=34.70.86.31 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.70.86.31/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-rs0-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-rs0-1%22%7D%29&start=1720612424&end=1720612484&step=60' "0" "0" + desc 'check mongo config metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongo config metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-4042-monitoring-cfg-1 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-4042-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720612426 ++ /usr/bin/date -u +%s + local end=1720612486 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.cON2ULTmgP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mbgvPZEnbN +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cON2ULTmgP +++ cat /tmp/tmp.mbgvPZEnbN +++ rm /tmp/tmp.cON2ULTmgP /tmp/tmp.mbgvPZEnbN +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pqZPw2Z6ey ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5NTLUqkIeS +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pqZPw2Z6ey +++ cat /tmp/tmp.5NTLUqkIeS +++ rm /tmp/tmp.pqZPw2Z6ey /tmp/tmp.5NTLUqkIeS +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + local endpoint=34.70.86.31 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.70.86.31/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-cfg-1%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-cfg-1%22%7D%29&start=1720612426&end=1720612486&step=60' "1720610346" "1720610346" + get_metric_values mongodb_connections monitoring-2-0-4042-monitoring-cfg-1 admin:admin + local metric=mongodb_connections + local instance=monitoring-2-0-4042-monitoring-cfg-1 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720612428 ++ /usr/bin/date -u +%s + local end=1720612488 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.A8QIU5bat5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.quuUnmflaz +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.A8QIU5bat5 +++ cat /tmp/tmp.quuUnmflaz +++ rm /tmp/tmp.A8QIU5bat5 /tmp/tmp.quuUnmflaz +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MqMDB5wRzX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4nyB3Xb9EG +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.MqMDB5wRzX +++ cat /tmp/tmp.4nyB3Xb9EG +++ rm /tmp/tmp.MqMDB5wRzX /tmp/tmp.4nyB3Xb9EG +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + local endpoint=34.70.86.31 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@34.70.86.31/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-cfg-1%22%7d%20or%20mongodb_connections%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-cfg-1%22%7D%29&start=1720612428&end=1720612488&step=60' + grep '^"[0-9]' "0" "0" + desc 'check mongos metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mongos metrics ----------------------------------------------------------------------------------- ++ kubectl get pod -l app.kubernetes.io/component=mongos -o 'jsonpath={.items[0].metadata.name}' + MONGOS_POD_NAME=monitoring-mongos-0 + get_metric_values node_boot_time_seconds monitoring-2-0-4042-monitoring-mongos-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-4042-monitoring-mongos-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1720612431 ++ /usr/bin/date -u +%s + local end=1720612491 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pRSbbjfVK7 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZExkRbV9wc +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.pRSbbjfVK7 +++ cat /tmp/tmp.ZExkRbV9wc +++ rm /tmp/tmp.pRSbbjfVK7 /tmp/tmp.ZExkRbV9wc +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hEtAdC2EG1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EMlPXG4EB8 +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.hEtAdC2EG1 +++ cat /tmp/tmp.EMlPXG4EB8 +++ rm /tmp/tmp.hEtAdC2EG1 /tmp/tmp.EMlPXG4EB8 +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + local endpoint=34.70.86.31 + jq '.data.result[0].values[][1]' + grep '^"[0-9]' + curl -s -k 'https://admin:admin@34.70.86.31/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-mongos-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4042-monitoring-mongos-0%22%7D%29&start=1720612431&end=1720612491&step=60' "1720610346" "1720610346" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan_values mongodb dev-mongod admin:admin + local service_type=mongodb + local environment=dev-mongod + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-09T23:56:23+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-10T11:56:23+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cPP0DgOYpH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ojq5qe3BZe +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.cPP0DgOYpH +++ cat /tmp/tmp.ojq5qe3BZe +++ rm /tmp/tmp.cPP0DgOYpH /tmp/tmp.ojq5qe3BZe +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TkDq1AnHnG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JzCCSiOAmJ +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.TkDq1AnHnG +++ cat /tmp/tmp.JzCCSiOAmJ +++ rm /tmp/tmp.TkDq1AnHnG /tmp/tmp.JzCCSiOAmJ +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + endpoint=34.70.86.31 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.70.86.31/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response='"TOTAL" "FIND version" "FIND system.version _id" "FIND oplog.rs"' + rm -f payload.json + [[ "TOTAL" "FIND version" "FIND system.version _id" "FIND oplog.rs" == \n\u\l\l ]] + get_qan_values mongodb dev-mongos admin:admin + local service_type=mongodb + local environment=dev-mongos + local user_pass=admin:admin + local start + local end + local endpoint ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z + start=2024-07-09T23:56:25+00:00 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + end=2024-07-10T11:56:25+00:00 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UcO5bxR5jo ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_ERR=/tmp/tmp.DgbNaY5UXC +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.UcO5bxR5jo +++ cat /tmp/tmp.DgbNaY5UXC +++ rm /tmp/tmp.UcO5bxR5jo /tmp/tmp.DgbNaY5UXC +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7d98e8uwFB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Yx4bvxB0Kt +++ local exit_status=0 +++ local timeout=4 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 -a -n 1 ']' +++ break +++ cat /tmp/tmp.7d98e8uwFB +++ cat /tmp/tmp.Yx4bvxB0Kt +++ rm /tmp/tmp.7d98e8uwFB /tmp/tmp.Yx4bvxB0Kt +++ return 0 ++ local ip=34.70.86.31 ++ '[' -n 34.70.86.31 -a 34.70.86.31 '!=' null ']' ++ echo 34.70.86.31 ++ return + endpoint=34.70.86.31 + cat + local response ++ curl -s -k -XPOST -d @payload.json https://admin:admin@34.70.86.31/v0/qan/GetReport ++ jq '.rows[].fingerprint' + response=null + rm -f payload.json + [[ null == \n\u\l\l ]] + echo 'No data for mongodb service type in QAN' No data for mongodb service type in QAN + exit 1