++ echo 'Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/monitoring-2-0.log' Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/logs/monitoring-2-0.log ++ '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ']' ++ SKIP_BACKUPS_TO_AWS_GCP= ++ oc get projects error: No Auth Provider found for name "gcp" +++ kubectl version -o json +++ jq -r .serverVersion.gitVersion +++ grep '\-eks\-' ++ '[' ']' ++ EKS=0 +++ kubectl version -o json +++ jq -r '.serverVersion.major + "." + .serverVersion.minor' +++ /usr/bin/sed -r 's/[^0-9.]+//g' ++ KUBE_VERSION=1.20 +++ helm version -c +++ /usr/bin/sed -re 's/.*SemVer:"([^"]+)".*/\1/; s/.*\bVersion:"([^"]+)".*/\1/' ++ HELM_VERSION=v3.8.1 ++ '[' v3 == v2 ']' + cluster=monitoring + create_infra monitoring-2-0-4736 + local ns=monitoring-2-0-4736 + '[' -n pxc-operator ']' + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide No resources found + kubectl patch pxc -n sh --type=merge -p '{"metadata":{"finalizers":[]}}' error: resource(s) were provided, but no name, label selector, or --all flag specified + : + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.2UbQxDH2CZ ++ mktemp + local LAST_ERR=/tmp/tmp.TKf3uQuPWB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.2UbQxDH2CZ No resources found + cat /tmp/tmp.TKf3uQuPWB + rm /tmp/tmp.2UbQxDH2CZ /tmp/tmp.TKf3uQuPWB + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.M6u30tP3BD ++ mktemp + local LAST_ERR=/tmp/tmp.wz5dl8W9b7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.M6u30tP3BD No resources found + cat /tmp/tmp.wz5dl8W9b7 + rm /tmp/tmp.M6u30tP3BD /tmp/tmp.wz5dl8W9b7 + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.yrhpGNFDpu ++ mktemp + local LAST_ERR=/tmp/tmp.e02YsZxrDO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.yrhpGNFDpu No resources found + cat /tmp/tmp.e02YsZxrDO + rm /tmp/tmp.yrhpGNFDpu /tmp/tmp.e02YsZxrDO + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + '[' '!' -z '' ']' + kubectl_bin delete namespace pxc-operator ++ mktemp + awk '{print$1}' + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + local LAST_OUT=/tmp/tmp.l7Bf9dd2xT + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.aCMhNeJ4oG ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.d9QycQQVFY + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.SGPPhwMnNh + local exit_status=0 + for i in '$(seq 0 2)' + kubectl delete namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.l7Bf9dd2xT + cat /tmp/tmp.SGPPhwMnNh + rm /tmp/tmp.l7Bf9dd2xT /tmp/tmp.SGPPhwMnNh + return 0 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.aCMhNeJ4oG namespace "pxc-operator" deleted + cat /tmp/tmp.d9QycQQVFY + rm /tmp/tmp.aCMhNeJ4oG /tmp/tmp.d9QycQQVFY + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + set +o xtrace namespace/pxc-operator - Error from server (NotFound): namespaces "pxc-operator" not found + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.gX60wwgukB ++ mktemp + local LAST_ERR=/tmp/tmp.FHuTmXg2zz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.gX60wwgukB namespace/pxc-operator created + cat /tmp/tmp.FHuTmXg2zz + rm /tmp/tmp.gX60wwgukB /tmp/tmp.FHuTmXg2zz + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.1wxBeiuYYH +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tkl6PozMvR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.1wxBeiuYYH ++ cat /tmp/tmp.Tkl6PozMvR ++ rm /tmp/tmp.1wxBeiuYYH /tmp/tmp.Tkl6PozMvR ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ZazHS8626X ++ mktemp + local LAST_ERR=/tmp/tmp.kQrs10STuV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=pxc-operator + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ZazHS8626X Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic" modified. + cat /tmp/tmp.kQrs10STuV + rm /tmp/tmp.ZazHS8626X /tmp/tmp.kQrs10STuV + return 0 + deploy_operator + desc 'start operator' + set +o xtrace ----------------------------------------------------------------------------------- start operator ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2KmkwrHtuX ++ mktemp + local LAST_ERR=/tmp/tmp.sDLcttI32o + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/crd.yaml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.2KmkwrHtuX customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com unchanged customresourcedefinition.apiextensions.k8s.io/perconaxtradbbackups.pxc.percona.com configured + cat /tmp/tmp.sDLcttI32o + rm /tmp/tmp.2KmkwrHtuX /tmp/tmp.sDLcttI32o + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-rbac.yaml + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' ++ mktemp + local LAST_OUT=/tmp/tmp.X13MeBrLmu ++ mktemp + local LAST_ERR=/tmp/tmp.HDmh6LWTns + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.X13MeBrLmu clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.HDmh6LWTns + rm /tmp/tmp.X13MeBrLmu /tmp/tmp.HDmh6LWTns + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/deploy/cw-operator.yaml + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a^' ++ mktemp + local LAST_OUT=/tmp/tmp.sDsiWQTRK0 ++ mktemp + local LAST_ERR=/tmp/tmp.m5qTTsUWxk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.sDsiWQTRK0 deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.m5qTTsUWxk + rm /tmp/tmp.sDsiWQTRK0 /tmp/tmp.m5qTTsUWxk + return 0 + sleep 10 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ grep -c percona-xtradb-cluster-operator +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.qhn2rxhauV +++ mktemp ++ local LAST_ERR=/tmp/tmp.SHUPF3fEAQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.qhn2rxhauV ++ cat /tmp/tmp.SHUPF3fEAQ ++ rm /tmp/tmp.qhn2rxhauV /tmp/tmp.SHUPF3fEAQ ++ return 0 + wait_pod percona-xtradb-cluster-operator-5699d7755d-6b2g7 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5699d7755d-6b2g7 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-5699d7755d-6b2g7 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace percona-xtradb-cluster-operator-5699d7755d-6b2g7.Ok + sleep 3 + create_namespace monitoring-2-0-4736 + local namespace=monitoring-2-0-4736 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + kubectl_bin get ns + '[' '!' -z '' ']' + kubectl_bin delete namespace monitoring-2-0-4736 + xargs kubectl delete ns + awk '{print$1}' ++ mktemp + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^NAME' + local LAST_OUT=/tmp/tmp.6nHpwHGDTF ++ mktemp + local LAST_OUT=/tmp/tmp.ELiPjqK0Lc ++ mktemp + local LAST_ERR=/tmp/tmp.QMw8sZ7fmT + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + kubectl delete namespace monitoring-2-0-4736 + local LAST_ERR=/tmp/tmp.7Fyb0qMuQw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get ns + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ELiPjqK0Lc + cat /tmp/tmp.7Fyb0qMuQw + rm /tmp/tmp.ELiPjqK0Lc /tmp/tmp.7Fyb0qMuQw + return 0 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace monitoring-2-0-4736 error: resource(s) were provided, but no name, label selector, or --all flag specified + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl delete namespace monitoring-2-0-4736 + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.6nHpwHGDTF + cat /tmp/tmp.QMw8sZ7fmT Error from server (NotFound): namespaces "monitoring-2-0-4736" not found + rm /tmp/tmp.6nHpwHGDTF /tmp/tmp.QMw8sZ7fmT + return 1 + : + wait_for_delete namespace/monitoring-2-0-4736 + local res=namespace/monitoring-2-0-4736 + set +o xtrace namespace/monitoring-2-0-4736 - Error from server (NotFound): namespaces "monitoring-2-0-4736" not found + kubectl_bin create namespace monitoring-2-0-4736 ++ mktemp + local LAST_OUT=/tmp/tmp.v0RCNoXi1T ++ mktemp + local LAST_ERR=/tmp/tmp.tuyyuhBPHK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl create namespace monitoring-2-0-4736 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.v0RCNoXi1T namespace/monitoring-2-0-4736 created + cat /tmp/tmp.tuyyuhBPHK + rm /tmp/tmp.v0RCNoXi1T /tmp/tmp.tuyyuhBPHK + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.yZfHqdizDu +++ mktemp ++ local LAST_ERR=/tmp/tmp.Bb5fep1NFh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl config current-context ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.yZfHqdizDu ++ cat /tmp/tmp.Bb5fep1NFh ++ rm /tmp/tmp.yZfHqdizDu /tmp/tmp.Bb5fep1NFh ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=monitoring-2-0-4736 ++ mktemp + local LAST_OUT=/tmp/tmp.inR0UDMnWW ++ mktemp + local LAST_ERR=/tmp/tmp.vu6C3qgfTL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic --namespace=monitoring-2-0-4736 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.inR0UDMnWW Context "gke_cloud-dev-112233_us-central1-a_jenkins-pxc-706f792a-basic" modified. + cat /tmp/tmp.vu6C3qgfTL + rm /tmp/tmp.inR0UDMnWW /tmp/tmp.vu6C3qgfTL + return 0 + apply_secrets + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.AsJgXBsWWj ++ mktemp + local LAST_ERR=/tmp/tmp.4D07nMxYrK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/cloud-secret.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.AsJgXBsWWj secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.4D07nMxYrK + rm /tmp/tmp.AsJgXBsWWj /tmp/tmp.4D07nMxYrK + return 0 + deploy_helm monitoring-2-0-4736 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add percona https://percona-charts.storage.googleapis.com/ "percona" already exists with the same configuration, skipping + helm repo add minio https://helm.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + platform=kubernetes + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + '[' '!' -z '' ']' + helm install monitoring --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server --set platform=kubernetes https://percona-charts.storage.googleapis.com/pmm-server-2.26.1.tgz NAME: monitoring LAST DEPLOYED: Tue Apr 12 11:54:15 2022 NAMESPACE: monitoring-2-0-4736 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-4736.svc.cluster.local:443 login: admin password: admin + SERVICE=postgres + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.ItPNUoSu18 ++ mktemp + local LAST_ERR=/tmp/tmp.w6zPzFzFM4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.ItPNUoSu18 + cat /tmp/tmp.w6zPzFzFM4 error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.ItPNUoSu18 /tmp/tmp.w6zPzFzFM4 + return 1 + echo 'Retry 0' Retry 0 + sleep 5 + let retry+=1 + '[' 1 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.uxTLiTVXnu ++ mktemp + local LAST_ERR=/tmp/tmp.bus6zVAU6X + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.uxTLiTVXnu + cat /tmp/tmp.bus6zVAU6X error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.uxTLiTVXnu /tmp/tmp.bus6zVAU6X + return 1 + echo 'Retry 1' Retry 1 + sleep 5 + let retry+=1 + '[' 2 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.A0lZkVUiSv ++ mktemp + local LAST_ERR=/tmp/tmp.hwyJWWsVsH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.A0lZkVUiSv + cat /tmp/tmp.hwyJWWsVsH error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.A0lZkVUiSv /tmp/tmp.hwyJWWsVsH + return 1 + echo 'Retry 2' Retry 2 + sleep 5 + let retry+=1 + '[' 3 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.toOoD0VDa5 ++ mktemp + local LAST_ERR=/tmp/tmp.ieq3fdFimd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.toOoD0VDa5 + cat /tmp/tmp.ieq3fdFimd error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.toOoD0VDa5 /tmp/tmp.ieq3fdFimd + return 1 + echo 'Retry 3' Retry 3 + sleep 5 + let retry+=1 + '[' 4 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.Y4VMNTQF8r ++ mktemp + local LAST_ERR=/tmp/tmp.XrZrEAdX6d + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.Y4VMNTQF8r + cat /tmp/tmp.XrZrEAdX6d error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.Y4VMNTQF8r /tmp/tmp.XrZrEAdX6d + return 1 + echo 'Retry 4' Retry 4 + sleep 5 + let retry+=1 + '[' 5 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.nqzX19kdZE ++ mktemp + local LAST_ERR=/tmp/tmp.cTLqHJsQPp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=1 + [[ 1 != 0 ]] + sleep 0 + cat /tmp/tmp.nqzX19kdZE + cat /tmp/tmp.cTLqHJsQPp error: unable to upgrade connection: container not found ("monitoring") + rm /tmp/tmp.nqzX19kdZE /tmp/tmp.cTLqHJsQPp + return 1 + echo 'Retry 5' Retry 5 + sleep 5 + let retry+=1 + '[' 6 -ge 20 ']' + kubectl_bin exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.GfcUDFegCX ++ mktemp + local LAST_ERR=/tmp/tmp.2s9JAtkBEG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'pgrep -x postgres >/dev/null' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GfcUDFegCX + cat /tmp/tmp.2s9JAtkBEG + rm /tmp/tmp.GfcUDFegCX /tmp/tmp.2s9JAtkBEG + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.syIOgMaTht +++ mktemp ++ local LAST_ERR=/tmp/tmp.VpH8EUPLUz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.syIOgMaTht ++ cat /tmp/tmp.VpH8EUPLUz ++ rm /tmp/tmp.syIOgMaTht /tmp/tmp.VpH8EUPLUz ++ return 0 + ADMIN_PASSWORD=admin + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.GnPAb4Fp7d ++ mktemp + local LAST_ERR=/tmp/tmp.IQpVwRlgFv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.GnPAb4Fp7d t=2022-04-12T11:55:13+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings t=2022-04-12T11:55:13+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings t=2022-04-12T11:55:13+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini t=2022-04-12T11:55:13+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini t=2022-04-12T11:55:13+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana t=2022-04-12T11:55:13+0000 lvl=info msg="Path Data" logger=settings path=/usr/share/grafana/data t=2022-04-12T11:55:13+0000 lvl=info msg="Path Logs" logger=settings path=/usr/share/grafana/data/log t=2022-04-12T11:55:13+0000 lvl=info msg="Path Plugins" logger=settings path=/srv/grafana/plugins t=2022-04-12T11:55:13+0000 lvl=info msg="Path Provisioning" logger=settings path=/usr/share/grafana/conf/provisioning t=2022-04-12T11:55:13+0000 lvl=info msg="App mode production" logger=settings t=2022-04-12T11:55:13+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 t=2022-04-12T11:55:13+0000 lvl=info msg="Creating SQLite database file" logger=sqlstore path=/usr/share/grafana/data/grafana.db t=2022-04-12T11:55:13+0000 lvl=info msg="Starting DB migrations" logger=migrator t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" t=2022-04-12T11:55:13+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" t=2022-04-12T11:55:14+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" t=2022-04-12T11:55:15+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" t=2022-04-12T11:55:15+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=2.106873181s t=2022-04-12T11:55:15+0000 lvl=info msg="Created default admin" logger=sqlstore user=admin t=2022-04-12T11:55:15+0000 lvl=info msg="Created default organization" logger=sqlstore Admin password changed successfully ✔ + cat /tmp/tmp.IQpVwRlgFv + rm /tmp/tmp.GnPAb4Fp7d /tmp/tmp.IQpVwRlgFv + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.IPJLXB4rsN ++ mktemp + local LAST_ERR=/tmp/tmp.IUoO8Cs2MR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/secrets.yml + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.IPJLXB4rsN secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.IUoO8Cs2MR + rm /tmp/tmp.IPJLXB4rsN /tmp/tmp.IUoO8Cs2MR + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + kubectl_bin apply -f - ++ mktemp + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.m0gbmPQgud + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-4736~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.HpirHg64af + local exit_status=0 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.m0gbmPQgud deployment.apps/pxc-client created + cat /tmp/tmp.HpirHg64af + rm /tmp/tmp.m0gbmPQgud /tmp/tmp.HpirHg64af + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/conf/monitoring.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/conf/monitoring.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/conf/monitoring.yml + /usr/bin/sed -e 's#initImage:.*-init$#initImage: perconalab/percona-xtradb-cluster-operator:PR-1125-706f792a#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-4736~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_OUT=/tmp/tmp.xrNZd3YJ3F + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1-11-0#' ++ mktemp + local LAST_ERR=/tmp/tmp.cExVGEwEkf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl apply -f - + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.xrNZd3YJ3F perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.cExVGEwEkf + rm /tmp/tmp.xrNZd3YJ3F /tmp/tmp.cExVGEwEkf + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BCAqfOfySQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GDeRHFOgvb +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.BCAqfOfySQ +++ cat /tmp/tmp.GDeRHFOgvb +++ rm /tmp/tmp.BCAqfOfySQ /tmp/tmp.GDeRHFOgvb +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace monitoring-haproxy-0.............................................error: a container name must be specified for pod monitoring-haproxy-0, choose one of: [pmm-client haproxy pxc-monit] .Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace monitoring-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace monitoring-pxc-1.....................................Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace monitoring-pxc-2..............................Ok + sleep 120 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gw48LH6Ams +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3LotCY026 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.gw48LH6Ams ++ cat /tmp/tmp.P3LotCY026 ++ rm /tmp/tmp.gw48LH6Ams /tmp/tmp.P3LotCY026 ++ return 0 + client_pod=pxc-client-5d749ff8b6-rqlfc + wait_pod pxc-client-5d749ff8b6-rqlfc + local pod=pxc-client-5d749ff8b6-rqlfc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-rqlfc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-rqlfc.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.01qH4iqUCW +++ mktemp ++ local LAST_ERR=/tmp/tmp.YKKQ0G6InU ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.01qH4iqUCW ++ cat /tmp/tmp.YKKQ0G6InU ++ rm /tmp/tmp.01qH4iqUCW /tmp/tmp.YKKQ0G6InU ++ return 0 + client_pod=pxc-client-5d749ff8b6-rqlfc + wait_pod pxc-client-5d749ff8b6-rqlfc + local pod=pxc-client-5d749ff8b6-rqlfc + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-5d749ff8b6-rqlfc + local container= + set +o xtrace pxc-client-5d749ff8b6-rqlfc.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8QvVdZSDfK +++ mktemp ++ local LAST_ERR=/tmp/tmp.7s66BOcIV1 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.8QvVdZSDfK ++ cat /tmp/tmp.7s66BOcIV1 ++ rm /tmp/tmp.8QvVdZSDfK /tmp/tmp.7s66BOcIV1 ++ return 0 + client_pod=pxc-client-5d749ff8b6-rqlfc + wait_pod pxc-client-5d749ff8b6-rqlfc + local pod=pxc-client-5d749ff8b6-rqlfc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-rqlfc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-rqlfc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.vUYUhOUyDQ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.vUYUhOUyDQ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.IHzYGGkLvA +++ mktemp ++ local LAST_ERR=/tmp/tmp.A1Ckw3cNw0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.IHzYGGkLvA ++ cat /tmp/tmp.A1Ckw3cNw0 ++ rm /tmp/tmp.IHzYGGkLvA /tmp/tmp.A1Ckw3cNw0 ++ return 0 + client_pod=pxc-client-5d749ff8b6-rqlfc + wait_pod pxc-client-5d749ff8b6-rqlfc + local pod=pxc-client-5d749ff8b6-rqlfc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-rqlfc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-rqlfc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.vUYUhOUyDQ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.vUYUhOUyDQ/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YQrOXIkQAe +++ mktemp ++ local LAST_ERR=/tmp/tmp.sHMIsu4lbJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.YQrOXIkQAe ++ cat /tmp/tmp.sHMIsu4lbJ ++ rm /tmp/tmp.YQrOXIkQAe /tmp/tmp.sHMIsu4lbJ ++ return 0 + client_pod=pxc-client-5d749ff8b6-rqlfc + wait_pod pxc-client-5d749ff8b6-rqlfc + local pod=pxc-client-5d749ff8b6-rqlfc + local max_retry=480 + local ns= ++ echo pxc-client-5d749ff8b6-rqlfc ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pxc-client-5d749ff8b6-rqlfc.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.vUYUhOUyDQ/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.vUYUhOUyDQ/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ kubectl_bin exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.byXv34G0Q6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.gQd29DHyIP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.byXv34G0Q6 ++ cat /tmp/tmp.gQd29DHyIP Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.byXv34G0Q6 /tmp/tmp.gQd29DHyIP ++ return 0 + '[' '' ']' + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.vUYUhOUyDQ/statefulset_monitoring-pxc.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + version_gt 1.22 ++ bc -l ++ echo '1.20 >= 1.22' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - metadata.selfLink + yq d - metadata.deletionTimestamp + yq d - metadata.managedFields + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - spec.nodeName + yq d - 'metadata.annotations."cloud.google.com/neg"' + yq d - '**."percona.com/*"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.finalizers' + yq d - '**.clusterIPs' + yq d - '**.healthCheckNodePort' + yq d - '**.dataSource' + yq d - '**.nodePort' + yq d - '**.imagePullSecrets' + yq d - '**.procMount' + yq d - '**.storageClassName' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - spec.volumeMode + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.image' + yq d - metadata.resourceVersion + yq d - '**.enableServiceLinks' + yq d - '**.clusterIP' + yq d - '**.controller-uid' + yq d - '**.uid' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - '**.namespace' + yq d - status + yq d - '**.(name==NAMESPACE)' + yq d - '**.(name==S3_BUCKET_URL)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==suffix)' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - '**.creationTimestamp' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + /usr/bin/sed 's/namespace\:.*name/name/' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + /usr/bin/sed s/monitoring-2-0-4736/namespace/g + local LAST_OUT=/tmp/tmp.ponemLHBiW ++ mktemp + local LAST_ERR=/tmp/tmp.YrUSZGOj0h + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ponemLHBiW + cat /tmp/tmp.YrUSZGOj0h + rm /tmp/tmp.ponemLHBiW /tmp/tmp.YrUSZGOj0h + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml /tmp/tmp.vUYUhOUyDQ/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.vUYUhOUyDQ/statefulset_monitoring-haproxy.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + version_gt 1.22 ++ echo '1.20 >= 1.22' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.21 ++ echo '1.20 >= 1.21' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + local LAST_OUT=/tmp/tmp.YfdXdCdrqJ + yq d - '**.namespace' + yq d - '**.uid' + yq d - metadata.resourceVersion + yq d - '**.env.(name==CLUSTER_HASH)' + yq d - 'metadata.annotations."cloud.google.com/neg"' ++ mktemp + local LAST_ERR=/tmp/tmp.jPwCVH3oTU + local exit_status=0 + yq d - '**.clusterIP' + yq d - '**.clusterIPs' + yq d - '**.dataSource' + yq d - '**.procMount' + yq d - '**."volume.beta.kubernetes.io/storage-provisioner"' + yq d - '**.volumeName' + yq d - '**."volume.kubernetes.io/storage-provisioner"' + yq d - '**."percona.com/*"' + yq d - '**."volume.kubernetes.io/selected-node"' + yq d - spec.volumeMode + yq d - spec.nodeName + yq d - '**.(name==percona-xtradb-cluster-operator-workload-token*)' + yq d - '**.creationTimestamp' + yq d - '**.image' ++ seq 0 2 + yq d - '**.storageClassName' + for i in '$(seq 0 2)' + kubectl get -o yaml statefulset/monitoring-haproxy + yq d - '**.finalizers' + yq d - '**.(volumeMode==Filesystem).volumeMode' + yq d - '**.nodePort' + yq d - '**.healthCheckNodePort' + yq d - '**.imagePullSecrets' + yq d - 'spec.volumeClaimTemplates.*.apiVersion' + yq d - '**.enableServiceLinks' + yq d - '**."kubernetes.io/pvc-protection"' + yq d - status + yq d - '**.(name==S3_BUCKET_URL)' + yq d - '**.(name==NAMESPACE)' + yq d - 'spec.volumeClaimTemplates.*.kind' + yq d - '**.(name==suffix)' + yq d - 'metadata.annotations."kubernetes.io/psp"' + yq d - '**.(name==S3_BUCKET_PATH)' + yq d - 'metadata.ownerReferences.*.apiVersion' + yq d - '**.controller-uid' + yq d - 'metadata.annotations."k8s.v1.cni.cncf.io*"' + yq d - '**.preemptionPolicy' + yq d - spec.ipFamilies + yq d - metadata.deletionTimestamp + yq d - spec.ipFamilyPolicy + /usr/bin/sed 's#^apiVersion: policy/v1beta1#apiVersion: policy/v1#' + /usr/bin/sed 's/name: kube-api-access-.*$/name: kube-api-access/' + yq d - metadata.selfLink + /usr/bin/sed 's/namespace\:.*name/name/' + yq d - '**.creationTimestamp' + /usr/bin/sed s/monitoring-2-0-4736/namespace/g + yq d - metadata.managedFields + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.YfdXdCdrqJ + cat /tmp/tmp.jPwCVH3oTU + rm /tmp/tmp.YfdXdCdrqJ /tmp/tmp.jPwCVH3oTU + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1125/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml /tmp/tmp.vUYUhOUyDQ/statefulset_monitoring-haproxy.yml + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds monitoring-2-0-4736-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=monitoring-2-0-4736-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1649764946 ++ /usr/bin/date -u +%s + local end=1649765006 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.ZlwVcIoHUT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tJutSB3Wpu +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.ZlwVcIoHUT +++ cat /tmp/tmp.tJutSB3Wpu +++ rm /tmp/tmp.ZlwVcIoHUT /tmp/tmp.tJutSB3Wpu +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.7M2yci4oQx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iKVFmQ13xL +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.7M2yci4oQx +++ cat /tmp/tmp.iKVFmQ13xL +++ rm /tmp/tmp.7M2yci4oQx /tmp/tmp.iKVFmQ13xL +++ return 0 ++ local ip=35.193.188.152 ++ '[' -n 35.193.188.152 -a 35.193.188.152 '!=' null ']' ++ echo 35.193.188.152 ++ return + local endpoint=35.193.188.152 + curl -s -k 'https://admin:admin@35.193.188.152/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-pxc-0%22%7D%29&start=1649764946&end=1649765006&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "1649755027" "1649755027" + get_metric_values mysql_global_status_uptime monitoring-2-0-4736-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=monitoring-2-0-4736-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1649764948 ++ /usr/bin/date -u +%s + local end=1649765008 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.i1egD12U9O ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QV2T7VuwUE +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.i1egD12U9O +++ cat /tmp/tmp.QV2T7VuwUE +++ rm /tmp/tmp.i1egD12U9O /tmp/tmp.QV2T7VuwUE +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j1M6KOwVyz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JlhMYFI5Wb +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.j1M6KOwVyz +++ cat /tmp/tmp.JlhMYFI5Wb +++ rm /tmp/tmp.j1M6KOwVyz /tmp/tmp.JlhMYFI5Wb +++ return 0 ++ local ip=35.193.188.152 ++ '[' -n 35.193.188.152 -a 35.193.188.152 '!=' null ']' ++ echo 35.193.188.152 ++ return + local endpoint=35.193.188.152 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.193.188.152/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-pxc-0%22%7D%29&start=1649764948&end=1649765008&step=60' + grep '^"[0-9]' "366" "426" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status monitoring-2-0-4736-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=monitoring-2-0-4736-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1649764950 ++ /usr/bin/date -u +%s + local end=1649765010 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NPrp91gnWl ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IgozhCMCg2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.NPrp91gnWl +++ cat /tmp/tmp.IgozhCMCg2 +++ rm /tmp/tmp.NPrp91gnWl /tmp/tmp.IgozhCMCg2 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].ip' +++ local LAST_OUT=/tmp/tmp.weVGY1JLFv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KNF3W7xqCf +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.weVGY1JLFv +++ cat /tmp/tmp.KNF3W7xqCf +++ rm /tmp/tmp.weVGY1JLFv /tmp/tmp.KNF3W7xqCf +++ return 0 ++ local ip=35.193.188.152 ++ '[' -n 35.193.188.152 -a 35.193.188.152 '!=' null ']' ++ echo 35.193.188.152 ++ return + local endpoint=35.193.188.152 + jq '.data.result[0].values[][1]' + curl -s -k 'https://admin:admin@35.193.188.152/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-haproxy-0%22%7D%29&start=1649764950&end=1649765010&step=60' + grep '^"[0-9]' "0" "0" + get_metric_values haproxy_backend_active_servers monitoring-2-0-4736-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=monitoring-2-0-4736-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1649764952 ++ /usr/bin/date -u +%s + local end=1649765012 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UqdIA9awwT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZuGnJQkHvv +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.UqdIA9awwT +++ cat /tmp/tmp.ZuGnJQkHvv +++ rm /tmp/tmp.UqdIA9awwT /tmp/tmp.ZuGnJQkHvv +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.CIUrAKQ9wn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pXRndtM9wk +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.CIUrAKQ9wn +++ cat /tmp/tmp.pXRndtM9wk +++ rm /tmp/tmp.CIUrAKQ9wn /tmp/tmp.pXRndtM9wk +++ return 0 ++ local ip=35.193.188.152 ++ '[' -n 35.193.188.152 -a 35.193.188.152 '!=' null ']' ++ echo 35.193.188.152 ++ return + local endpoint=35.193.188.152 + curl -s -k 'https://admin:admin@35.193.188.152/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22monitoring-2-0-4736-monitoring-haproxy-0%22%7D%29&start=1649764952&end=1649765012&step=60' + grep '^"[0-9]' + jq '.data.result[0].values[][1]' "1" "1" + sleep 90 + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2022-04-12T11:35:04 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2022-04-12T12:05:04 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.TDTiPz3pVg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MXJG6Io1jZ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.TDTiPz3pVg +++ cat /tmp/tmp.MXJG6Io1jZ +++ rm /tmp/tmp.TDTiPz3pVg /tmp/tmp.MXJG6Io1jZ +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.pl50EVJVOG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gWnp7tmgUh +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ [[ 0 != 0 ]] +++ break +++ cat /tmp/tmp.pl50EVJVOG +++ cat /tmp/tmp.gWnp7tmgUh +++ rm /tmp/tmp.pl50EVJVOG /tmp/tmp.gWnp7tmgUh +++ return 0 ++ local ip=35.193.188.152 ++ '[' -n 35.193.188.152 -a 35.193.188.152 '!=' null ']' ++ echo 35.193.188.152 ++ return + local endpoint=35.193.188.152 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@35.193.188.152/v0/qan/GetReport + jq '.rows[].fingerprint' null + rm -f payload.json + [[ -n '' ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-4736 + local namespace=monitoring-2-0-4736 + local ignore_logs=false + [[ false == \f\a\l\s\e ]] + grep -v level=info + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u + tee /tmp/tmp.vUYUhOUyDQ/operator.log +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.mV962jocH4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.15EReVe6oi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ [[ 0 != 0 ]] ++ break ++ cat /tmp/tmp.mV962jocH4 ++ cat /tmp/tmp.15EReVe6oi ++ rm /tmp/tmp.mV962jocH4 /tmp/tmp.15EReVe6oi ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-6b2g7 ++ mktemp + local LAST_OUT=/tmp/tmp.4CNhtAFRVC ++ mktemp + local LAST_ERR=/tmp/tmp.KAUA4xTZ26 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5699d7755d-6b2g7 + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.4CNhtAFRVC + cat /tmp/tmp.KAUA4xTZ26 + rm /tmp/tmp.4CNhtAFRVC /tmp/tmp.KAUA4xTZ26 + return 0 I0412 11:53:50.210221 1 request.go:645] Throttling request took 1.044751987s, request: GET:https://10.7.240.1:443/apis/networking.gke.io/v1beta2?timeout=32s {"level":"info",,"caller":"pxc/version.go:328","msg":"update PXC version (fetched from db)","new version":"8.0.27-18.1"} {"level":"info",,"caller":"v1/pxc_types.go:874","msg":"HAProxy size will be changed from 1 to 2 due to safe config"} {"level":"info",,"caller":"v1/pxc_types.go:875","msg":"Set allowUnsafeConfigurations=true to disable safe configuration"} {"level":"info",,"logger":"cmd","msg":"Git commit: 706f792ae47c369cb3556faff186b6873a8a247f Git branch: PR-1125-706f792a Build time: 2022-04-12T09:09:41Z"} {"level":"info",,"logger":"cmd","msg":"Go OS/Arch: linux/amd64"} {"level":"info",,"logger":"cmd","msg":"Go Version: go1.17.8"} {"level":"info",,"logger":"cmd","msg":"operator-sdk Version: v0.19.4"} {"level":"info",,"logger":"cmd","msg":"Registering Components."} {"level":"info",,"logger":"cmd","msg":"Runs on","platform":"kubernetes","version":"v1.20.15-gke.4100"} {"level":"info",,"logger":"cmd","msg":"Starting the Cmd."} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Starting certificate watcher"} {"level":"info",,"logger":"controller-runtime.certwatcher","msg":"Updated current TLS certificate"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterbackup-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbcluster-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting Controller"} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting EventSource","source":"kind source: /, Kind="} {"level":"info",,"logger":"controller-runtime.manager.controller.perconaxtradbclusterrestore-controller","msg":"Starting workers","worker count":1} {"level":"info",,"logger":"controller-runtime.manager","msg":"starting metrics server","path":"/metrics"} {"level":"info",,"logger":"controller-runtime.metrics","msg":"metrics server is starting to listen","addr":":8080"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"registering webhook","path":"/validate-percona-xtradbcluster"} {"level":"info",,"logger":"controller-runtime.webhook","msg":"serving webhook server","host":"","port":9443} {"level":"info",,"logger":"controller-runtime.webhook.webhooks","msg":"starting webhook server"} {"level":"info",,"logger":"leader","msg":"Became the leader."} {"level":"info",,"logger":"leader","msg":"No pre-existing lock was found."} {"level":"info",,"logger":"leader","msg":"Trying to become the leader."} + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-4736 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4dYZMckyW1 ++ mktemp + local LAST_ERR=/tmp/tmp.jbjQqmIicr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.4dYZMckyW1 perconaxtradbcluster.pxc.percona.com "monitoring" deleted + cat /tmp/tmp.jbjQqmIicr + rm /tmp/tmp.4dYZMckyW1 /tmp/tmp.jbjQqmIicr + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.T0FsgGEQ5l ++ mktemp + local LAST_ERR=/tmp/tmp.dEiI76hVeb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.T0FsgGEQ5l No resources found + cat /tmp/tmp.dEiI76hVeb + rm /tmp/tmp.T0FsgGEQ5l /tmp/tmp.dEiI76hVeb + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.y1fYFw8c6h ++ mktemp + local LAST_ERR=/tmp/tmp.zoweuIEnIF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.y1fYFw8c6h No resources found + cat /tmp/tmp.zoweuIEnIF + rm /tmp/tmp.y1fYFw8c6h /tmp/tmp.zoweuIEnIF + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.ePSPZm2dHM ++ mktemp + local LAST_ERR=/tmp/tmp.b7wcXJqrpY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + [[ 0 != 0 ]] + break + cat /tmp/tmp.ePSPZm2dHM validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.b7wcXJqrpY + rm /tmp/tmp.ePSPZm2dHM /tmp/tmp.b7wcXJqrpY + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.5.4/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-4736 + rm -rf /tmp/tmp.vUYUhOUyDQ ++ mktemp + local LAST_OUT=/tmp/tmp.IYDyHFpttk + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.96smSJyEWx ++ mktemp + local LAST_ERR=/tmp/tmp.H0x3UpMb5B + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.x4sSF6iFep + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-4736 + for i in '$(seq 0 2)' + kubectl delete --grace-period=0 --force=true namespace pxc-operator