Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/logs/monitoring-2-0-8-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-6342 + local ns=monitoring-2-0-6342 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-25312 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.CrjvNCNjLK ++ mktemp + local LAST_ERR=/tmp/tmp.YMiFQDUqGn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CrjvNCNjLK perconaxtradbcluster.pxc.percona.com "monitoring" deleted + cat /tmp/tmp.YMiFQDUqGn + rm /tmp/tmp.CrjvNCNjLK /tmp/tmp.YMiFQDUqGn + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.SKcCQD1YTB ++ mktemp + local LAST_ERR=/tmp/tmp.4iJgAkciK6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SKcCQD1YTB No resources found + cat /tmp/tmp.4iJgAkciK6 + rm /tmp/tmp.SKcCQD1YTB /tmp/tmp.4iJgAkciK6 + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.qgeoUwDbk6 ++ mktemp + local LAST_ERR=/tmp/tmp.9AS2wLTRQN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qgeoUwDbk6 No resources found + cat /tmp/tmp.9AS2wLTRQN + rm /tmp/tmp.qgeoUwDbk6 /tmp/tmp.9AS2wLTRQN + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.yiMLGRXl2m ++ mktemp ++ mktemp + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.jJYg9VrSqM + local exit_status=0 + local LAST_OUT=/tmp/tmp.0FuWcHSwFt ++ mktemp + local LAST_ERR=/tmp/tmp.h63dFHnA01 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yiMLGRXl2m + cat /tmp/tmp.jJYg9VrSqM + rm /tmp/tmp.yiMLGRXl2m /tmp/tmp.jJYg9VrSqM + return 0 namespace "monitoring-2-0-25312" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0FuWcHSwFt namespace "pxc-operator" deleted + cat /tmp/tmp.h63dFHnA01 + rm /tmp/tmp.0FuWcHSwFt /tmp/tmp.h63dFHnA01 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.mrNY7HC9qV ++ mktemp + local LAST_ERR=/tmp/tmp.DnQYzQpXCG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mrNY7HC9qV namespace/pxc-operator created + cat /tmp/tmp.DnQYzQpXCG + rm /tmp/tmp.mrNY7HC9qV /tmp/tmp.DnQYzQpXCG + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nCYpA27Dh4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Pez09paIB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nCYpA27Dh4 ++ cat /tmp/tmp.2Pez09paIB ++ rm /tmp/tmp.nCYpA27Dh4 /tmp/tmp.2Pez09paIB ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dIQBhzcmG6 ++ mktemp + local LAST_ERR=/tmp/tmp.GYflfUvXdX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dIQBhzcmG6 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8" modified. + cat /tmp/tmp.GYflfUvXdX + rm /tmp/tmp.dIQBhzcmG6 /tmp/tmp.GYflfUvXdX + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Vb1mBLmsqh ++ mktemp + local LAST_ERR=/tmp/tmp.tfAIP5oaWv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Vb1mBLmsqh customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.tfAIP5oaWv + rm /tmp/tmp.Vb1mBLmsqh /tmp/tmp.tfAIP5oaWv + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.PpxwnQ0mFR ++ mktemp + local LAST_ERR=/tmp/tmp.XcFpY09s4F + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PpxwnQ0mFR clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.XcFpY09s4F + rm /tmp/tmp.PpxwnQ0mFR /tmp/tmp.XcFpY09s4F + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1712-e95e769e^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.xi1jLQdFgo ++ mktemp + local LAST_ERR=/tmp/tmp.rQ6ptwcAlK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xi1jLQdFgo deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.rQ6ptwcAlK + rm /tmp/tmp.xi1jLQdFgo /tmp/tmp.rQ6ptwcAlK + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.vRAbgLGo54 ++ mktemp + local LAST_ERR=/tmp/tmp.yeN89028EW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vRAbgLGo54 pod/percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw condition met + cat /tmp/tmp.yeN89028EW + rm /tmp/tmp.vRAbgLGo54 /tmp/tmp.yeN89028EW + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.dwnx0EgiII +++ mktemp ++ local LAST_ERR=/tmp/tmp.dhFcrlYYGo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dwnx0EgiII ++ cat /tmp/tmp.dhFcrlYYGo ++ rm /tmp/tmp.dwnx0EgiII /tmp/tmp.dhFcrlYYGo ++ return 0 + wait_pod percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw 480 pxc-operator + local pod=percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw condition met percona-xtradb-cluster-operator-79c4bc5dcb-fbfsw.Ok + sleep 3 + create_namespace monitoring-2-0-6342 + local namespace=monitoring-2-0-6342 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-6342' + set +o xtrace ----------------------------------------------------------------------------------- + xargs kubectl delete ns cleaned up old namespaces monitoring-2-0-6342 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-6342 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.vBDQrWvYS9 ++ mktemp + local LAST_ERR=/tmp/tmp.SVgu6MG7cF + local exit_status=0 + local LAST_OUT=/tmp/tmp.kNgwHl2oP9 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.oXo4Olh1AX + local exit_status=0 ++ seq 0 2 + awk '{print$1}' + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6342 + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6342 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vBDQrWvYS9 + cat /tmp/tmp.SVgu6MG7cF + rm /tmp/tmp.vBDQrWvYS9 /tmp/tmp.SVgu6MG7cF + return 0 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6342 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.kNgwHl2oP9 + cat /tmp/tmp.oXo4Olh1AX Error from server (NotFound): namespaces "monitoring-2-0-6342" not found + rm /tmp/tmp.kNgwHl2oP9 /tmp/tmp.oXo4Olh1AX + return 1 + : + wait_for_delete namespace/monitoring-2-0-6342 + local res=namespace/monitoring-2-0-6342 + echo -n 'namespace/monitoring-2-0-6342 - ' namespace/monitoring-2-0-6342 - + set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-6342" not found + desc 'create namespace monitoring-2-0-6342' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-6342 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-6342 ++ mktemp + local LAST_OUT=/tmp/tmp.H7Ke94xMJJ ++ mktemp + local LAST_ERR=/tmp/tmp.vwkeQUatml + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-6342 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H7Ke94xMJJ namespace/monitoring-2-0-6342 created + cat /tmp/tmp.vwkeQUatml + rm /tmp/tmp.H7Ke94xMJJ /tmp/tmp.vwkeQUatml + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WWItSZ6pM2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Cd3N4eRX6l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WWItSZ6pM2 ++ cat /tmp/tmp.Cd3N4eRX6l ++ rm /tmp/tmp.WWItSZ6pM2 /tmp/tmp.Cd3N4eRX6l ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8 --namespace=monitoring-2-0-6342 ++ mktemp + local LAST_OUT=/tmp/tmp.oCYUyq1rIf ++ mktemp + local LAST_ERR=/tmp/tmp.D1wqI2z252 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8 --namespace=monitoring-2-0-6342 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oCYUyq1rIf Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1712-e95e769e-1-cluster8" modified. + cat /tmp/tmp.D1wqI2z252 + rm /tmp/tmp.oCYUyq1rIf /tmp/tmp.D1wqI2z252 + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.DeHMGDEJ2O ++ mktemp + local LAST_ERR=/tmp/tmp.4xLPfEAihW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DeHMGDEJ2O secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.4xLPfEAihW + rm /tmp/tmp.DeHMGDEJ2O /tmp/tmp.4xLPfEAihW + return 0 + deploy_helm monitoring-2-0-6342 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Thu May 16 02:12:12 2024 NAMESPACE: monitoring-2-0-6342 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-6342.svc.cluster.local:443 login: admin password: admin + kubectl_bin wait --for=condition=Ready pod/monitoring-0 --timeout=120s ++ mktemp + local LAST_OUT=/tmp/tmp.zdqNeLJQ7G ++ mktemp + local LAST_ERR=/tmp/tmp.bKgkmC2Mem + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod/monitoring-0 --timeout=120s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zdqNeLJQ7G pod/monitoring-0 condition met + cat /tmp/tmp.bKgkmC2Mem + rm /tmp/tmp.zdqNeLJQ7G /tmp/tmp.bKgkmC2Mem + return 0 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.8f2Ab6jOML ++ mktemp + local LAST_ERR=/tmp/tmp.4Eqg7vTvve + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8f2Ab6jOML + cat /tmp/tmp.4Eqg7vTvve + rm /tmp/tmp.8f2Ab6jOML /tmp/tmp.4Eqg7vTvve + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xat4Jmu9FV +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZKmpOif4Jh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Xat4Jmu9FV ++ cat /tmp/tmp.ZKmpOif4Jh ++ rm /tmp/tmp.Xat4Jmu9FV /tmp/tmp.ZKmpOif4Jh ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.Y8QBSJxqIN ++ mktemp + local LAST_ERR=/tmp/tmp.RwoxXqkWQd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y8QBSJxqIN logger=settings t=2024-05-16T02:13:18.952484868Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2024-05-16T02:13:18.952677307Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2024-05-16T02:13:18.952696718Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2024-05-16T02:13:18.952703797Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2024-05-16T02:13:18.952723959Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2024-05-16T02:13:18.952730491Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2024-05-16T02:13:18.9527369Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2024-05-16T02:13:18.952743277Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2024-05-16T02:13:18.952750802Z level=info msg="App mode production" logger=sqlstore t=2024-05-16T02:13:18.952845824Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2024-05-16T02:13:18.977963352Z level=info msg="Starting DB migrations" logger=migrator t=2024-05-16T02:13:18.98359145Z level=info msg="migrations completed" performed=0 skipped=452 duration=655.531µs logger=secrets t=2024-05-16T02:13:18.985454107Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2024-05-16T02:13:19.02780569Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2024-05-16T02:13:19.292857479Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2024-05-16T02:13:19.292897561Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2024-05-16T02:13:19.29292282Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2024-05-16T02:13:19.292933061Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2024-05-16T02:13:19.302578966Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2024-05-16T02:13:19.302955864Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.loader t=2024-05-16T02:13:19.303062976Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2024-05-16T02:13:19.3030813Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2024-05-16T02:13:19.303089853Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2024-05-16T02:13:19.303096874Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2024-05-16T02:13:19.303104343Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2024-05-16T02:13:19.303111261Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2024-05-16T02:13:19.318164822Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2024-05-16T02:13:19.318203845Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2024-05-16T02:13:19.318214411Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2024-05-16T02:13:19.318222873Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2024-05-16T02:13:19.318233496Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2024-05-16T02:13:19.318241755Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2024-05-16T02:13:19.318250516Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel Admin password changed successfully ✔ + cat /tmp/tmp.RwoxXqkWQd + rm /tmp/tmp.Y8QBSJxqIN /tmp/tmp.RwoxXqkWQd + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Bjgq0iuQBS ++ mktemp + local LAST_ERR=/tmp/tmp.3aNbWi12uk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Bjgq0iuQBS secret/my-cluster-secrets created + cat /tmp/tmp.3aNbWi12uk + rm /tmp/tmp.Bjgq0iuQBS /tmp/tmp.3aNbWi12uk + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/client.yml + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-6342~ + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1712-e95e769e#' + local LAST_OUT=/tmp/tmp.uAk0FQB69B ++ mktemp + local LAST_ERR=/tmp/tmp.qU6CzYEMhn + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uAk0FQB69B deployment.apps/pxc-client created + cat /tmp/tmp.qU6CzYEMhn + rm /tmp/tmp.uAk0FQB69B /tmp/tmp.qU6CzYEMhn + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/monitoring.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/monitoring.yml ++ mktemp + local LAST_OUT=/tmp/tmp.Gsv1hCYYJe + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1712-e95e769e#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/monitoring.yml + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-6342~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_ERR=/tmp/tmp.kmFUtFlIIO + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Gsv1hCYYJe perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.kmFUtFlIIO + rm /tmp/tmp.Gsv1hCYYJe /tmp/tmp.kmFUtFlIIO + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ya8qqndQ5A ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9Yw99FX2Fr +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ya8qqndQ5A +++ cat /tmp/tmp.9Yw99FX2Fr +++ rm /tmp/tmp.ya8qqndQ5A /tmp/tmp.9Yw99FX2Fr +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6342 ++ mktemp + local LAST_OUT=/tmp/tmp.E24tHFNEVY ++ mktemp + local LAST_ERR=/tmp/tmp.9HocRb2hyV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6342 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6342 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6342 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.E24tHFNEVY + cat /tmp/tmp.9HocRb2hyV error: no matching resources found + rm /tmp/tmp.E24tHFNEVY /tmp/tmp.9HocRb2hyV + return 1 + true + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met monitoring-haproxy-0Defaulted container "pmm-client" out of: pmm-client, haproxy, pxc-monit, pxc-init (init) .Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met monitoring-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met monitoring-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met monitoring-pxc-2.Ok + sleep 120 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NkWqaLMEfa +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZNLrSp9bZO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NkWqaLMEfa ++ cat /tmp/tmp.ZNLrSp9bZO ++ rm /tmp/tmp.NkWqaLMEfa /tmp/tmp.ZNLrSp9bZO ++ return 0 + client_pod=pxc-client-6644d8898f-5f9mn + wait_pod pxc-client-6644d8898f-5f9mn + local pod=pxc-client-6644d8898f-5f9mn + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-5f9mn ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-5f9mn condition met pxc-client-6644d8898f-5f9mn.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vmIdgbfO2O +++ mktemp ++ local LAST_ERR=/tmp/tmp.yvkQiYIPzq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vmIdgbfO2O ++ cat /tmp/tmp.yvkQiYIPzq ++ rm /tmp/tmp.vmIdgbfO2O /tmp/tmp.yvkQiYIPzq ++ return 0 + client_pod=pxc-client-6644d8898f-5f9mn + wait_pod pxc-client-6644d8898f-5f9mn + local pod=pxc-client-6644d8898f-5f9mn + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-5f9mn ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-5f9mn condition met pxc-client-6644d8898f-5f9mn.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MNnNCYO0hW +++ mktemp ++ local LAST_ERR=/tmp/tmp.RdpeEPTT3Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MNnNCYO0hW ++ cat /tmp/tmp.RdpeEPTT3Y ++ rm /tmp/tmp.MNnNCYO0hW /tmp/tmp.RdpeEPTT3Y ++ return 0 + client_pod=pxc-client-6644d8898f-5f9mn + wait_pod pxc-client-6644d8898f-5f9mn + local pod=pxc-client-6644d8898f-5f9mn + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-5f9mn ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-5f9mn condition met pxc-client-6644d8898f-5f9mn.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Ku8J43Hwq1/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.Ku8J43Hwq1/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.GbPQJxi5rj +++ mktemp ++ local LAST_ERR=/tmp/tmp.dqLShUPJPI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.GbPQJxi5rj ++ cat /tmp/tmp.dqLShUPJPI ++ rm /tmp/tmp.GbPQJxi5rj /tmp/tmp.dqLShUPJPI ++ return 0 + client_pod=pxc-client-6644d8898f-5f9mn + wait_pod pxc-client-6644d8898f-5f9mn + local pod=pxc-client-6644d8898f-5f9mn + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-5f9mn ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-5f9mn condition met pxc-client-6644d8898f-5f9mn.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Ku8J43Hwq1/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.Ku8J43Hwq1/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.x9ClSJt4Lk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ymvfQ9dWcT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.x9ClSJt4Lk ++ cat /tmp/tmp.ymvfQ9dWcT ++ rm /tmp/tmp.x9ClSJt4Lk /tmp/tmp.ymvfQ9dWcT ++ return 0 + client_pod=pxc-client-6644d8898f-5f9mn + wait_pod pxc-client-6644d8898f-5f9mn + local pod=pxc-client-6644d8898f-5f9mn + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-6644d8898f-5f9mn ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-6644d8898f-5f9mn condition met pxc-client-6644d8898f-5f9mn.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.Ku8J43Hwq1/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.Ku8J43Hwq1/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ kubectl_bin exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PWVGClXyis +++ mktemp ++ local LAST_ERR=/tmp/tmp.KYgk3bzjvC ++ local exit_status=0 +++ seq 0 2 ++ egrep -o 'early-plugin-load=keyring_\w+.so' ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PWVGClXyis ++ cat /tmp/tmp.KYgk3bzjvC Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.PWVGClXyis /tmp/tmp.KYgk3bzjvC ++ return 0 + '[' '' ']' + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.fJFYluMzzx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7IsExatx4e ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.fJFYluMzzx ++++ cat /tmp/tmp.7IsExatx4e ++++ rm /tmp/tmp.fJFYluMzzx /tmp/tmp.7IsExatx4e ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xYWWFFbrNL +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ixujJGWefX ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.xYWWFFbrNL ++++ cat /tmp/tmp.ixujJGWefX ++++ rm /tmp/tmp.xYWWFFbrNL /tmp/tmp.ixujJGWefX ++++ return 0 +++ local ip=35.225.25.173 +++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' +++ echo 35.225.25.173 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@35.225.25.173/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 260 78 --:--:-- --:--:-- --:--:-- 339 + API_KEY='"eyJrIjoiWHFDT01NYTRZZFFwdmxrWWwyMDFvS3N1c2h0c1BmbGciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiWHFDT01NYTRZZFFwdmxrWWwyMDFvS3N1c2h0c1BmbGciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.A9emnscLqu ++ mktemp + local LAST_ERR=/tmp/tmp.upXKglmRMp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiWHFDT01NYTRZZFFwdmxrWWwyMDFvS3N1c2h0c1BmbGciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.A9emnscLqu secret/my-cluster-secrets patched + cat /tmp/tmp.upXKglmRMp + rm /tmp/tmp.A9emnscLqu /tmp/tmp.upXKglmRMp + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u2GyAmnmsA +++ mktemp ++ local LAST_ERR=/tmp/tmp.EGkmHWXE3n ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.u2GyAmnmsA ++ cat /tmp/tmp.EGkmHWXE3n ++ rm /tmp/tmp.u2GyAmnmsA /tmp/tmp.EGkmHWXE3n ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PikdSsH3xP +++ mktemp ++ local LAST_ERR=/tmp/tmp.9yk62P25do ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PikdSsH3xP ++ cat /tmp/tmp.9yk62P25do ++ rm /tmp/tmp.PikdSsH3xP /tmp/tmp.9yk62P25do ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0iIctEq5qS +++ mktemp ++ local LAST_ERR=/tmp/tmp.At89FlyHbq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0iIctEq5qS ++ cat /tmp/tmp.At89FlyHbq ++ rm /tmp/tmp.0iIctEq5qS /tmp/tmp.At89FlyHbq ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EHyONOTf9M +++ mktemp ++ local LAST_ERR=/tmp/tmp.pIlza9ezvc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EHyONOTf9M ++ cat /tmp/tmp.pIlza9ezvc ++ rm /tmp/tmp.EHyONOTf9M /tmp/tmp.pIlza9ezvc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1ll4tLn59j +++ mktemp ++ local LAST_ERR=/tmp/tmp.bCtkcFJXvs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1ll4tLn59j ++ cat /tmp/tmp.bCtkcFJXvs ++ rm /tmp/tmp.1ll4tLn59j /tmp/tmp.bCtkcFJXvs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LoNGLWQ1vc +++ mktemp ++ local LAST_ERR=/tmp/tmp.XHwoteSrwz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LoNGLWQ1vc ++ cat /tmp/tmp.XHwoteSrwz ++ rm /tmp/tmp.LoNGLWQ1vc /tmp/tmp.XHwoteSrwz ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6s4aGU7NAl +++ mktemp ++ local LAST_ERR=/tmp/tmp.HMyc3zo6z8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6s4aGU7NAl ++ cat /tmp/tmp.HMyc3zo6z8 ++ rm /tmp/tmp.6s4aGU7NAl /tmp/tmp.HMyc3zo6z8 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.m13rCSfMEo ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.2m95XRoBxS +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.m13rCSfMEo +++++ cat /tmp/tmp.2m95XRoBxS +++++ rm /tmp/tmp.m13rCSfMEo /tmp/tmp.2m95XRoBxS +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gwFv3nvvX4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.YNsTgFRXGq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gwFv3nvvX4 ++ cat /tmp/tmp.YNsTgFRXGq ++ rm /tmp/tmp.gwFv3nvvX4 /tmp/tmp.YNsTgFRXGq ++ return 0 + [[ 2 == \2 ]] + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6342", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.yIeQYLDCxX ++ mktemp + local LAST_ERR=/tmp/tmp.wJEGYLRWOi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yIeQYLDCxX + cat /tmp/tmp.wJEGYLRWOi + rm /tmp/tmp.yIeQYLDCxX /tmp/tmp.wJEGYLRWOi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml /tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6342", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.1KgvlC7781 ++ mktemp + local LAST_ERR=/tmp/tmp.mnTDOUixn9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1KgvlC7781 + cat /tmp/tmp.mnTDOUixn9 + rm /tmp/tmp.1KgvlC7781 /tmp/tmp.mnTDOUixn9 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml /tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-haproxy.yml + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sxstkwc8BI ++ mktemp + local LAST_ERR=/tmp/tmp.A7Gi3sGgAD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sxstkwc8BI secret/my-env-var-secrets created + cat /tmp/tmp.A7Gi3sGgAD + rm /tmp/tmp.sxstkwc8BI /tmp/tmp.A7Gi3sGgAD + return 0 + desc 'add new PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add new PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.FBPlCT7jAJ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.lc5ZOvSLGZ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.FBPlCT7jAJ ++++ cat /tmp/tmp.lc5ZOvSLGZ ++++ rm /tmp/tmp.FBPlCT7jAJ /tmp/tmp.lc5ZOvSLGZ ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.tcCvowZLQF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.AhhdQjMlcQ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.tcCvowZLQF ++++ cat /tmp/tmp.AhhdQjMlcQ ++++ rm /tmp/tmp.tcCvowZLQF /tmp/tmp.AhhdQjMlcQ ++++ return 0 +++ local ip=35.225.25.173 +++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' +++ echo 35.225.25.173 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@35.225.25.173/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 267 84 --:--:-- --:--:-- --:--:-- 352 + API_KEY_NEW='"eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.bHJmVZZAGF ++ mktemp + local LAST_ERR=/tmp/tmp.BJQobQY4aa + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bHJmVZZAGF secret/my-cluster-secrets patched + cat /tmp/tmp.BJQobQY4aa + rm /tmp/tmp.bHJmVZZAGF /tmp/tmp.BJQobQY4aa + return 0 + desc 'delete old PMM key' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM key ----------------------------------------------------------------------------------- +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++ jq '.[] | select( .name == "operator").id' ++++ sed -e 's/^"//; s/"$//;' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JMRRAzD937 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hOgqOCucYr ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.JMRRAzD937 ++++ cat /tmp/tmp.hOgqOCucYr ++++ rm /tmp/tmp.JMRRAzD937 /tmp/tmp.hOgqOCucYr ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ jq '.status.loadBalancer.ingress[].ip' ++++ local LAST_OUT=/tmp/tmp.NxyY42UalC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yXLw0ozkAB ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.NxyY42UalC ++++ cat /tmp/tmp.yXLw0ozkAB ++++ rm /tmp/tmp.NxyY42UalC /tmp/tmp.yXLw0ozkAB ++++ return 0 +++ local ip=35.225.25.173 +++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' +++ echo 35.225.25.173 +++ return ++ curl --insecure -X GET https://admin:admin@35.225.25.173/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 597 100 597 0 0 1331 0 --:--:-- --:--:-- --:--:-- 1329 + ID_API_KEY_OLD=6 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mamMv1b9YQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NW6q54HbBe +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.mamMv1b9YQ +++ cat /tmp/tmp.NW6q54HbBe +++ rm /tmp/tmp.mamMv1b9YQ /tmp/tmp.NW6q54HbBe +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.n5xpaL5v0V ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XozbKTDQBs +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.n5xpaL5v0V +++ cat /tmp/tmp.XozbKTDQBs +++ rm /tmp/tmp.n5xpaL5v0V /tmp/tmp.XozbKTDQBs +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + curl --insecure -X DELETE https://admin:admin@35.225.25.173/graph/api/auth/keys/6 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 65 0 --:--:-- --:--:-- --:--:-- 65 {"message":"API key deleted"}+ wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.26 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6342", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.XfNbTSG3Ia ++ mktemp + local LAST_ERR=/tmp/tmp.k5GUi1FGEt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XfNbTSG3Ia + cat /tmp/tmp.k5GUi1FGEt + rm /tmp/tmp.XfNbTSG3Ia /tmp/tmp.k5GUi1FGEt + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml /tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6342", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.hRhuLvIs0i ++ mktemp + local LAST_ERR=/tmp/tmp.gEy7HQKruu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hRhuLvIs0i + cat /tmp/tmp.gEy7HQKruu + rm /tmp/tmp.hRhuLvIs0i /tmp/tmp.gEy7HQKruu + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml /tmp/tmp.Ku8J43Hwq1/statefulset_monitoring-haproxy.yml + desc 'verify clients agents statuses' + set +o xtrace ----------------------------------------------------------------------------------- verify clients agents statuses ----------------------------------------------------------------------------------- + sleep 300 ++ getSecretData my-cluster-secrets pmmserverkey ++ local secretName=my-cluster-secrets ++ local dataKey=pmmserverkey ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.WupZbhaUwi +++ mktemp ++ local LAST_ERR=/tmp/tmp.tWkxmMIaYY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WupZbhaUwi ++ cat /tmp/tmp.tWkxmMIaYY ++ rm /tmp/tmp.WupZbhaUwi /tmp/tmp.tWkxmMIaYY ++ return 0 + API_KEY=eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.x9DwiJOtLt +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KtgnYncXiw ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.x9DwiJOtLt ++++ cat /tmp/tmp.KtgnYncXiw ++++ rm /tmp/tmp.x9DwiJOtLt /tmp/tmp.KtgnYncXiw ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ sed -e 's/^"//; s/"$//;' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0cIIexdYFa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.BOpKlJ5YcR ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.0cIIexdYFa ++++ cat /tmp/tmp.BOpKlJ5YcR ++++ rm /tmp/tmp.0cIIexdYFa /tmp/tmp.BOpKlJ5YcR ++++ return 0 +++ local ip=35.225.25.173 +++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' +++ echo 35.225.25.173 +++ return ++ get_mgmnt_service_list eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 35.225.25.173 monitoring-2-0-6342 ++ local api_key=eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 ++ local endpoint=35.225.25.173 ++ local namespace=monitoring-2-0-6342 ++ jq 'walk(if type=="object" then with_entries(select(.key | test("service_id|node_id|agent_id|created_at|updated_at") | not)) else . end)' ++ curl -s -k -H 'Authorization: Bearer eyJrIjoiVHc4cWhBOVdOc2ZkSE5GU2FjRk04WjlmM2c5QWh3Q1YiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9' -X POST https://35.225.25.173/v1/management/Service/List ++ jq 'walk(if type == "array" then sort_by(.agent_type) else . end)' ++ /usr/bin/sed -i s/monitoring-2-0-6342-//g /tmp/tmp.Ku8J43Hwq1/active_pmm_agents.json ++ cat /tmp/tmp.Ku8J43Hwq1/active_pmm_agents.json ++ jq '.services | sort_by(.node_name)' ++ echo /tmp/tmp.Ku8J43Hwq1/active_pmm_agents_sorted.json + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1712/e2e-tests/monitoring-2-0/compare/agents-list.json /tmp/tmp.Ku8J43Hwq1/active_pmm_agents_sorted.json + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1715826474 ++ /usr/bin/date -u +%s + local end=1715826534 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.4pp3vMMVbA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.V6f7NHV3BX +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.4pp3vMMVbA +++ cat /tmp/tmp.V6f7NHV3BX +++ rm /tmp/tmp.4pp3vMMVbA /tmp/tmp.V6f7NHV3BX +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.lTzWmV1YiV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.VqkDIeHBIB +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.lTzWmV1YiV +++ cat /tmp/tmp.VqkDIeHBIB +++ rm /tmp/tmp.lTzWmV1YiV /tmp/tmp.VqkDIeHBIB +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + local endpoint=35.225.25.173 ++ curl -s -k 'https://admin:admin@35.225.25.173/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0%22%7D%29&start=1715826474&end=1715826534&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1715826474, "1715823695" ], [ 1715826534, "1715823695" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1715826474, "1715823695" ], [ 1715826534, "1715823695" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1715826474, "1715823695" ], [ 1715826534, "1715823695" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "1715823695" "1715823695" + get_metric_values mysql_global_status_uptime pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1715826477 ++ /usr/bin/date -u +%s + local end=1715826537 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TyQLgnW3Y6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ho8VMCpCr0 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TyQLgnW3Y6 +++ cat /tmp/tmp.ho8VMCpCr0 +++ rm /tmp/tmp.TyQLgnW3Y6 /tmp/tmp.ho8VMCpCr0 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5AqzvnERrS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.0cPmHa3UM0 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.5AqzvnERrS +++ cat /tmp/tmp.0cPmHa3UM0 +++ rm /tmp/tmp.5AqzvnERrS /tmp/tmp.0cPmHa3UM0 +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + local endpoint=35.225.25.173 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@35.225.25.173/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-pxc-0%22%7D%29&start=1715826477&end=1715826537&step=60' + local 'result={ "metric": {}, "values": [ [ 1715826477, "171" ], [ 1715826537, "231" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1715826477, "171" ], [ 1715826537, "231" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1715826477, "171" ], [ 1715826537, "231" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "171" "231" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1715826479 ++ /usr/bin/date -u +%s + local end=1715826539 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aOinqDkXUy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.aDONSih9l2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aOinqDkXUy +++ cat /tmp/tmp.aDONSih9l2 +++ rm /tmp/tmp.aOinqDkXUy /tmp/tmp.aDONSih9l2 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GStHlQxIkG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IpzyEgk7CG +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.GStHlQxIkG +++ cat /tmp/tmp.IpzyEgk7CG +++ rm /tmp/tmp.GStHlQxIkG /tmp/tmp.IpzyEgk7CG +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + local endpoint=35.225.25.173 ++ curl -s -k 'https://admin:admin@35.225.25.173/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0%22%7D%29&start=1715826479&end=1715826539&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1715826479, "0" ], [ 1715826539, "0" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1715826479, "0" ], [ 1715826539, "0" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1715826479, "0" ], [ 1715826539, "0" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "0" "0" + get_metric_values haproxy_backend_active_servers pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1715826481 ++ /usr/bin/date -u +%s + local end=1715826541 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GbOauD6fI5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.14aFivN184 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.GbOauD6fI5 +++ cat /tmp/tmp.14aFivN184 +++ rm /tmp/tmp.GbOauD6fI5 /tmp/tmp.14aFivN184 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.RFbMA6ca1p ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xQQqyHVP03 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.RFbMA6ca1p +++ cat /tmp/tmp.xQQqyHVP03 +++ rm /tmp/tmp.RFbMA6ca1p /tmp/tmp.xQQqyHVP03 +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + local endpoint=35.225.25.173 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@35.225.25.173/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6342-monitoring-haproxy-0%22%7D%29&start=1715826481&end=1715826541&step=60' + local 'result={ "metric": {}, "values": [ [ 1715826481, "1" ], [ 1715826541, "1" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1715826481, "1" ], [ 1715826541, "1" ] ] }' = null ']' + jq '.values[][1]' + echo -n '{ "metric": {}, "values": [ [ 1715826481, "1" ], [ 1715826541, "1" ] ] }' + grep '^"[0-9]' "1" "1" + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2024-05-16T01:59:03 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2024-05-16T02:29:03 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.STDvSOBfJ6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2Mrx1gNVaA +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.STDvSOBfJ6 +++ cat /tmp/tmp.2Mrx1gNVaA +++ rm /tmp/tmp.STDvSOBfJ6 /tmp/tmp.2Mrx1gNVaA +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.CreKE3mkex ++++ mktemp +++ local LAST_ERR=/tmp/tmp.G07I5zB5IJ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.CreKE3mkex +++ cat /tmp/tmp.G07I5zB5IJ +++ rm /tmp/tmp.CreKE3mkex /tmp/tmp.G07I5zB5IJ +++ return 0 ++ local ip=35.225.25.173 ++ '[' -n 35.225.25.173 -a 35.225.25.173 '!=' null ']' ++ echo 35.225.25.173 ++ return + local endpoint=35.225.25.173 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + jq '.rows[].fingerprint' + curl -s -k -XPOST -d @payload.json https://admin:admin@35.225.25.173/v0/qan/GetReport null + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qbkViiR0jU ++++ mktemp +++ local LAST_ERR=/tmp/tmp.envazGV7qZ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.qbkViiR0jU +++ cat /tmp/tmp.envazGV7qZ +++ rm /tmp/tmp.qbkViiR0jU /tmp/tmp.envazGV7qZ +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.A871luj9V5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PnNpNBhZPG +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.A871luj9V5 +++ cat /tmp/tmp.PnNpNBhZPG +++ rm /tmp/tmp.A871luj9V5 /tmp/tmp.PnNpNBhZPG +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.YVHh8wewOk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.SQN3s5TKqF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.YVHh8wewOk +++ cat /tmp/tmp.SQN3s5TKqF +++ rm /tmp/tmp.YVHh8wewOk /tmp/tmp.SQN3s5TKqF +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Qowo4Q0Web ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1sYBeNTagn +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Qowo4Q0Web +++ cat /tmp/tmp.1sYBeNTagn +++ rm /tmp/tmp.Qowo4Q0Web /tmp/tmp.1sYBeNTagn +++ return 0 ++ echo /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 /node_id/9c0873c3-53b8-4278-8154-2262cef57893 /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 /node_id/9c0873c3-53b8-4278-8154-2262cef57893 /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.JjIAgiu5JN ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.vrEH8pAJMb +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.JjIAgiu5JN +++++ cat /tmp/tmp.vrEH8pAJMb +++++ rm /tmp/tmp.JjIAgiu5JN /tmp/tmp.vrEH8pAJMb +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hMDmzFCfSE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.beO3I8JQMb ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.hMDmzFCfSE ++++ cat /tmp/tmp.beO3I8JQMb ++++ rm /tmp/tmp.hMDmzFCfSE /tmp/tmp.beO3I8JQMb ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Bezti69sm0 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.miUTHpEy9f ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.Bezti69sm0 ++++ cat /tmp/tmp.miUTHpEy9f ++++ rm /tmp/tmp.Bezti69sm0 /tmp/tmp.miUTHpEy9f ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZGbRTSO601 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vxs7ETOpAa +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ZGbRTSO601 +++ cat /tmp/tmp.vxs7ETOpAa +++ rm /tmp/tmp.ZGbRTSO601 /tmp/tmp.vxs7ETOpAa +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9c0873c3-53b8-4278-8154-2262cef57893 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.YctIyTbdip ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.uu4bzX9MrI +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.YctIyTbdip +++++ cat /tmp/tmp.uu4bzX9MrI +++++ rm /tmp/tmp.YctIyTbdip /tmp/tmp.uu4bzX9MrI +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TkN6YZOrHv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.vqNFOeweZ2 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.TkN6YZOrHv ++++ cat /tmp/tmp.vqNFOeweZ2 ++++ rm /tmp/tmp.TkN6YZOrHv /tmp/tmp.vqNFOeweZ2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qBuXlsh9eD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.HdakWnwe2O ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.qBuXlsh9eD ++++ cat /tmp/tmp.HdakWnwe2O ++++ rm /tmp/tmp.qBuXlsh9eD /tmp/tmp.HdakWnwe2O ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cE5cEta5kx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GuMgOK1uhJ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.cE5cEta5kx +++ cat /tmp/tmp.GuMgOK1uhJ +++ rm /tmp/tmp.cE5cEta5kx /tmp/tmp.GuMgOK1uhJ +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.mk3tO0LoRN ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.6a4iGruKc3 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.mk3tO0LoRN +++++ cat /tmp/tmp.6a4iGruKc3 +++++ rm /tmp/tmp.mk3tO0LoRN /tmp/tmp.6a4iGruKc3 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.soFFgHppOi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nLfoTCA5L2 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.soFFgHppOi ++++ cat /tmp/tmp.nLfoTCA5L2 ++++ rm /tmp/tmp.soFFgHppOi /tmp/tmp.nLfoTCA5L2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qQtN3tFN8C +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1iu7x4kkVu ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.qQtN3tFN8C ++++ cat /tmp/tmp.1iu7x4kkVu ++++ rm /tmp/tmp.qQtN3tFN8C /tmp/tmp.1iu7x4kkVu ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.6ZsDAvaS0z ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zXiyNkEpwF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.6ZsDAvaS0z +++ cat /tmp/tmp.zXiyNkEpwF +++ rm /tmp/tmp.6ZsDAvaS0z /tmp/tmp.zXiyNkEpwF +++ return 0 ++ echo /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 /node_id/9c0873c3-53b8-4278-8154-2262cef57893 /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/9c0873c3-53b8-4278-8154-2262cef57893 ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a ']' + kubectl_bin patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.H34TYPxjEx ++ mktemp + local LAST_ERR=/tmp/tmp.ow9LnDE9br + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H34TYPxjEx perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.ow9LnDE9br + rm /tmp/tmp.H34TYPxjEx /tmp/tmp.ow9LnDE9br + return 0 + wait_for_delete pod/monitoring-pxc-0 + local res=pod/monitoring-pxc-0 + echo -n 'pod/monitoring-pxc-0 - ' pod/monitoring-pxc-0 - + set +o xtrace ....................Error from server (NotFound): pods "monitoring-pxc-0" not found + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 /node_id/9c0873c3-53b8-4278-8154-2262cef57893 /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/fb51cdfa-5b3a-446b-8de8-88b246a649c1 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qF9E3OCc7B ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.pyulb8NJie +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.qF9E3OCc7B +++++ cat /tmp/tmp.pyulb8NJie +++++ rm /tmp/tmp.qF9E3OCc7B /tmp/tmp.pyulb8NJie +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.k6WTNOyrle +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KHHqxLxcdW ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.k6WTNOyrle ++++ cat /tmp/tmp.KHHqxLxcdW ++++ rm /tmp/tmp.k6WTNOyrle /tmp/tmp.KHHqxLxcdW ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VcSBZKA2uN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.IHUnH81g2M ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.VcSBZKA2uN ++++ cat /tmp/tmp.IHUnH81g2M ++++ rm /tmp/tmp.VcSBZKA2uN /tmp/tmp.IHUnH81g2M ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MgnnKtsOIj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QJky8RQJaC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.MgnnKtsOIj +++ cat /tmp/tmp.QJky8RQJaC +++ rm /tmp/tmp.MgnnKtsOIj /tmp/tmp.QJky8RQJaC +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/9c0873c3-53b8-4278-8154-2262cef57893 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.DZC7Ay3HB1 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.dXImAROI79 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.DZC7Ay3HB1 +++++ cat /tmp/tmp.dXImAROI79 +++++ rm /tmp/tmp.DZC7Ay3HB1 /tmp/tmp.dXImAROI79 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.353WBfCJGk +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Tfgx7G91Uc ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.353WBfCJGk ++++ cat /tmp/tmp.Tfgx7G91Uc ++++ rm /tmp/tmp.353WBfCJGk /tmp/tmp.Tfgx7G91Uc ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1JNo3jFCxF +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.WXnT1uqUXj ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.1JNo3jFCxF ++++ cat /tmp/tmp.WXnT1uqUXj ++++ rm /tmp/tmp.1JNo3jFCxF /tmp/tmp.WXnT1uqUXj ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.D6Zao5MBfK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c00q9RHycY +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.D6Zao5MBfK +++ cat /tmp/tmp.c00q9RHycY +++ rm /tmp/tmp.D6Zao5MBfK /tmp/tmp.c00q9RHycY +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep /node_id/e551cc13-5cd5-4440-8a13-5adee270ca6a +++ awk '{print $4}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.VVpo4LLjaY ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Pz2RwEwGgx +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.VVpo4LLjaY +++++ cat /tmp/tmp.Pz2RwEwGgx +++++ rm /tmp/tmp.VVpo4LLjaY /tmp/tmp.Pz2RwEwGgx +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.BqV0eupjNf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DdqkKevtKp ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.BqV0eupjNf ++++ cat /tmp/tmp.DdqkKevtKp ++++ rm /tmp/tmp.BqV0eupjNf /tmp/tmp.DdqkKevtKp ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.oN3TMVf0pU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pMx8WYAm0H ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.oN3TMVf0pU ++++ cat /tmp/tmp.pMx8WYAm0H ++++ rm /tmp/tmp.oN3TMVf0pU /tmp/tmp.pMx8WYAm0H ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ppye3HuWf2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oGYHWlIL7A +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6342 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.225.25.173/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Ppye3HuWf2 +++ cat /tmp/tmp.oGYHWlIL7A +++ rm /tmp/tmp.Ppye3HuWf2 /tmp/tmp.oGYHWlIL7A +++ return 0 ++ echo + [[ -n '' ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ kubectl_bin get secrets -o json +++ mktemp ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or endswith(".sql") or contains("release") or contains("namespace") or contains("AWS_ACCESS_KEY_ID") or contains("AZURE_STORAGE_ACCOUNT_NAME")) | not) | .value' ++ local LAST_OUT=/tmp/tmp.EhIBDm5RfB +++ mktemp ++ local LAST_ERR=/tmp/tmp.rWTe45uQ1y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EhIBDm5RfB ++ cat /tmp/tmp.rWTe45uQ1y ++ rm /tmp/tmp.EhIBDm5RfB /tmp/tmp.rWTe45uQ1y ++ return 0 + secrets='WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0=' + echo secrets=WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0= secrets=WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2lWSGM0Y1doQk9WZE9jMlprU0U1R1UyRmpSazA0V2psbU0yYzVRV2gzUTFZaUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0= ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='XJ4+HHLUP02I7GZNmxEhDH2FOIbMolgJXSWtrl6W +Szh8Eb0IZ0o25tIvAz/r7SsdmDyct3OD5CL323nVolZYvZ+XewdY13fXEoG3jGsZ7/NnIj/u9ve+AStyLKQkg== TKUKCtvAYD/nGZwtF1kHA8QnMzh6QG0YkRVI+FKI f1+eA353oPW,9