Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/logs/monitoring-2-0-8-0.log WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 WARNING: version difference between client (1.30) and server (1.26) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-6105 + local ns=monitoring-2-0-6105 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-8295 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.NyxusK9g7c ++ mktemp + local LAST_ERR=/tmp/tmp.ReNPZPf520 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NyxusK9g7c perconaxtradbcluster.pxc.percona.com "monitoring" deleted + cat /tmp/tmp.ReNPZPf520 + rm /tmp/tmp.NyxusK9g7c /tmp/tmp.ReNPZPf520 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.BmjT048vMg ++ mktemp + local LAST_ERR=/tmp/tmp.7WNlexcA3c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BmjT048vMg No resources found + cat /tmp/tmp.7WNlexcA3c + rm /tmp/tmp.BmjT048vMg /tmp/tmp.7WNlexcA3c + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.5HzBkdLOQu ++ mktemp + local LAST_ERR=/tmp/tmp.ZHlGnaoreV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5HzBkdLOQu No resources found + cat /tmp/tmp.ZHlGnaoreV + rm /tmp/tmp.5HzBkdLOQu /tmp/tmp.ZHlGnaoreV + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator+ awk '{print$1}' ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.vFjkt00rEY ++ mktemp + local LAST_ERR=/tmp/tmp.ao0OQ6d5CJ + local exit_status=0 ++ seq 0 2 + kubectl_bin get ns ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.x6CcTZL6CU ++ mktemp + local LAST_ERR=/tmp/tmp.5q8YjiveFr + local exit_status=0 + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.x6CcTZL6CU + cat /tmp/tmp.5q8YjiveFr + rm /tmp/tmp.x6CcTZL6CU /tmp/tmp.5q8YjiveFr + return 0 namespace "monitoring-2-0-8295" deleted Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vFjkt00rEY namespace "pxc-operator" deleted + cat /tmp/tmp.ao0OQ6d5CJ + rm /tmp/tmp.vFjkt00rEY /tmp/tmp.ao0OQ6d5CJ + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'namespace/pxc-operator - ' namespace/pxc-operator - + set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.dL3EyrmoLO ++ mktemp + local LAST_ERR=/tmp/tmp.T0EIWJZGpW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dL3EyrmoLO namespace/pxc-operator created + cat /tmp/tmp.T0EIWJZGpW + rm /tmp/tmp.dL3EyrmoLO /tmp/tmp.T0EIWJZGpW + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.jxONgVc8n7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uChAs3jx39 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jxONgVc8n7 ++ cat /tmp/tmp.uChAs3jx39 ++ rm /tmp/tmp.jxONgVc8n7 /tmp/tmp.uChAs3jx39 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.fL3W4cruGF ++ mktemp + local LAST_ERR=/tmp/tmp.1yKUZeS2hK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fL3W4cruGF Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8" modified. + cat /tmp/tmp.1yKUZeS2hK + rm /tmp/tmp.fL3W4cruGF /tmp/tmp.1yKUZeS2hK + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.uuP5k225kF ++ mktemp + local LAST_ERR=/tmp/tmp.xYHCncTI1y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uuP5k225kF customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.xYHCncTI1y + rm /tmp/tmp.uuP5k225kF /tmp/tmp.xYHCncTI1y + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.IlRT2vH0mn ++ mktemp + local LAST_ERR=/tmp/tmp.Vt9hwCKIqw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IlRT2vH0mn clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.Vt9hwCKIqw + rm /tmp/tmp.IlRT2vH0mn /tmp/tmp.Vt9hwCKIqw + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68^' + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "DEBUG"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' ++ mktemp + local LAST_OUT=/tmp/tmp.wR4APkjSRI ++ mktemp + local LAST_ERR=/tmp/tmp.zZI1tg86pg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.wR4APkjSRI deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.zZI1tg86pg + rm /tmp/tmp.wR4APkjSRI /tmp/tmp.zZI1tg86pg + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.PX5JgcsaFf ++ mktemp + local LAST_ERR=/tmp/tmp.Z6L2IONRTR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PX5JgcsaFf pod/percona-xtradb-cluster-operator-6849457d9-7d6tw condition met + cat /tmp/tmp.Z6L2IONRTR + rm /tmp/tmp.PX5JgcsaFf /tmp/tmp.Z6L2IONRTR + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.W1hMRuvSJE +++ mktemp ++ local LAST_ERR=/tmp/tmp.w9PGpTKO2B ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.W1hMRuvSJE ++ cat /tmp/tmp.w9PGpTKO2B ++ rm /tmp/tmp.W1hMRuvSJE /tmp/tmp.w9PGpTKO2B ++ return 0 + wait_pod percona-xtradb-cluster-operator-6849457d9-7d6tw 480 pxc-operator + local pod=percona-xtradb-cluster-operator-6849457d9-7d6tw + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-6849457d9-7d6tw ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-6849457d9-7d6tw condition met percona-xtradb-cluster-operator-6849457d9-7d6tw.Ok + sleep 3 + create_namespace monitoring-2-0-6105 + local namespace=monitoring-2-0-6105 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ kubectl get clusterrole ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-6105' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-6105 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-6105 + awk '{print$1}' ++ mktemp + egrep -v '^kube-|^default$|Terminating|pxc-operator|openshift|^NAME' + xargs kubectl delete ns + local LAST_OUT=/tmp/tmp.2yu7Vs77Qc ++ mktemp + local LAST_OUT=/tmp/tmp.AQhO2AM7ir ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.HLWu8wkTmJ + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.Dv1c3gJoQQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6105 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.AQhO2AM7ir + cat /tmp/tmp.Dv1c3gJoQQ + rm /tmp/tmp.AQhO2AM7ir /tmp/tmp.Dv1c3gJoQQ + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6105 Error from server (Forbidden): namespaces "default" is forbidden: this namespace may not be deleted + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-6105 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.2yu7Vs77Qc + cat /tmp/tmp.HLWu8wkTmJ Error from server (NotFound): namespaces "monitoring-2-0-6105" not found + rm /tmp/tmp.2yu7Vs77Qc /tmp/tmp.HLWu8wkTmJ + return 1 + : + wait_for_delete namespace/monitoring-2-0-6105 + local res=namespace/monitoring-2-0-6105 + echo -n 'namespace/monitoring-2-0-6105 - ' namespace/monitoring-2-0-6105 - + set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-6105" not found + desc 'create namespace monitoring-2-0-6105' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-6105 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-6105 ++ mktemp + local LAST_OUT=/tmp/tmp.5Q3ijwJTqJ ++ mktemp + local LAST_ERR=/tmp/tmp.bhousLLjOe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-6105 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5Q3ijwJTqJ namespace/monitoring-2-0-6105 created + cat /tmp/tmp.bhousLLjOe + rm /tmp/tmp.5Q3ijwJTqJ /tmp/tmp.bhousLLjOe + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.NqatOYiX8m +++ mktemp ++ local LAST_ERR=/tmp/tmp.w13vDp0bTY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NqatOYiX8m ++ cat /tmp/tmp.w13vDp0bTY ++ rm /tmp/tmp.NqatOYiX8m /tmp/tmp.w13vDp0bTY ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8 --namespace=monitoring-2-0-6105 ++ mktemp + local LAST_OUT=/tmp/tmp.IFmswPH4vx ++ mktemp + local LAST_ERR=/tmp/tmp.kUyrGsh3Qh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8 --namespace=monitoring-2-0-6105 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IFmswPH4vx Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-1740-0a840b68-3-cluster8" modified. + cat /tmp/tmp.kUyrGsh3Qh + rm /tmp/tmp.IFmswPH4vx /tmp/tmp.kUyrGsh3Qh + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.v8z1R5BKqh ++ mktemp + local LAST_ERR=/tmp/tmp.2wFdeDA6T7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.v8z1R5BKqh secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.2wFdeDA6T7 + rm /tmp/tmp.v8z1R5BKqh /tmp/tmp.2wFdeDA6T7 + return 0 + deploy_helm monitoring-2-0-6105 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Fri Jun 28 07:47:20 2024 NAMESPACE: monitoring-2-0-6105 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-6105.svc.cluster.local:443 login: admin password: admin + kubectl_bin wait --for=condition=Ready pod/monitoring-0 --timeout=120s ++ mktemp + local LAST_OUT=/tmp/tmp.mIpDfoomp3 ++ mktemp + local LAST_ERR=/tmp/tmp.yIZU1mRewr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod/monitoring-0 --timeout=120s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mIpDfoomp3 pod/monitoring-0 condition met + cat /tmp/tmp.yIZU1mRewr + rm /tmp/tmp.mIpDfoomp3 /tmp/tmp.yIZU1mRewr + return 0 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.OBYNheRgtu ++ mktemp + local LAST_ERR=/tmp/tmp.tBRCBkn7hH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OBYNheRgtu + cat /tmp/tmp.tBRCBkn7hH + rm /tmp/tmp.OBYNheRgtu /tmp/tmp.tBRCBkn7hH + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mF7llDcGv4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kEvMGhHShm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mF7llDcGv4 ++ cat /tmp/tmp.kEvMGhHShm ++ rm /tmp/tmp.mF7llDcGv4 /tmp/tmp.kEvMGhHShm ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.Wzj2ExuyvP ++ mktemp + local LAST_ERR=/tmp/tmp.uS0N7NQtWH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Wzj2ExuyvP logger=settings t=2024-06-28T07:47:48.848852956Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2024-06-28T07:47:48.849105314Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2024-06-28T07:47:48.849130886Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2024-06-28T07:47:48.849140845Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2024-06-28T07:47:48.849148436Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2024-06-28T07:47:48.849156751Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2024-06-28T07:47:48.849163041Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2024-06-28T07:47:48.849171477Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2024-06-28T07:47:48.849181033Z level=info msg="App mode production" logger=sqlstore t=2024-06-28T07:47:48.849297262Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2024-06-28T07:47:48.875888342Z level=info msg="Starting DB migrations" logger=migrator t=2024-06-28T07:47:48.883058756Z level=info msg="migrations completed" performed=0 skipped=452 duration=809.136µs logger=secrets t=2024-06-28T07:47:48.885680171Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2024-06-28T07:47:48.93380718Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2024-06-28T07:47:49.209162026Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2024-06-28T07:47:49.209230967Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2024-06-28T07:47:49.209266542Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2024-06-28T07:47:49.209283155Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2024-06-28T07:47:49.219118927Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2024-06-28T07:47:49.219458455Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.loader t=2024-06-28T07:47:49.21963768Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2024-06-28T07:47:49.219663303Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2024-06-28T07:47:49.219673135Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2024-06-28T07:47:49.236928134Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2024-06-28T07:47:49.236976919Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2024-06-28T07:47:49.236999419Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2024-06-28T07:47:49.237008607Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2024-06-28T07:47:49.237019059Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2024-06-28T07:47:49.237029269Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2024-06-28T07:47:49.237038799Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2024-06-28T07:47:49.237050026Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2024-06-28T07:47:49.237059614Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2024-06-28T07:47:49.237069071Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource Admin password changed successfully ✔ + cat /tmp/tmp.uS0N7NQtWH + rm /tmp/tmp.Wzj2ExuyvP /tmp/tmp.uS0N7NQtWH + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.NKe1nmPm0w ++ mktemp + local LAST_ERR=/tmp/tmp.Mz4K9he0x7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NKe1nmPm0w secret/my-cluster-secrets created + cat /tmp/tmp.Mz4K9he0x7 + rm /tmp/tmp.NKe1nmPm0w /tmp/tmp.Mz4K9he0x7 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + local LAST_OUT=/tmp/tmp.uVJPGf30eM + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-6105~ + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ mktemp + local LAST_ERR=/tmp/tmp.tZG4ZRcmFz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uVJPGf30eM deployment.apps/pxc-client created + cat /tmp/tmp.tZG4ZRcmFz + rm /tmp/tmp.uVJPGf30eM /tmp/tmp.tZG4ZRcmFz + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/monitoring.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/monitoring.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/monitoring.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.LtAhEpVKhT + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.9JOArrsYCr + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-6105~ + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-1740-0a840b68#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LtAhEpVKhT perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.9JOArrsYCr + rm /tmp/tmp.LtAhEpVKhT /tmp/tmp.9JOArrsYCr + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z9Zrc7Qh2n ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ttJdLbbImC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.z9Zrc7Qh2n +++ cat /tmp/tmp.ttJdLbbImC +++ rm /tmp/tmp.z9Zrc7Qh2n /tmp/tmp.ttJdLbbImC +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6105 ++ mktemp + local LAST_OUT=/tmp/tmp.2ivVVl0cXA ++ mktemp + local LAST_ERR=/tmp/tmp.deGwiIVV44 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6105 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6105 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-6105 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.2ivVVl0cXA + cat /tmp/tmp.deGwiIVV44 error: no matching resources found + rm /tmp/tmp.2ivVVl0cXA /tmp/tmp.deGwiIVV44 + return 1 + true + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace Error from server (NotFound): pods "monitoring-haproxy-0" not found monitoring-haproxy-0......................................................Defaulted container "pmm-client" out of: pmm-client, haproxy, pxc-monit, pxc-init (init) .Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met monitoring-pxc-0.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met monitoring-pxc-1.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met monitoring-pxc-2.Ok + sleep 120 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -proot_password -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xHfNWItqhS +++ mktemp ++ local LAST_ERR=/tmp/tmp.IxgCDHuLIJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xHfNWItqhS ++ cat /tmp/tmp.IxgCDHuLIJ ++ rm /tmp/tmp.xHfNWItqhS /tmp/tmp.IxgCDHuLIJ ++ return 0 + client_pod=pxc-client-6644d8898f-hrtz9 + wait_pod pxc-client-6644d8898f-hrtz9 + local pod=pxc-client-6644d8898f-hrtz9 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' ++ echo pxc-client-6644d8898f-hrtz9 + local container= + set +o xtrace pod/pxc-client-6644d8898f-hrtz9 condition met pxc-client-6644d8898f-hrtz9.Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -proot_password -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.DH8hONj53P +++ mktemp ++ local LAST_ERR=/tmp/tmp.f5vkh69n04 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.DH8hONj53P ++ cat /tmp/tmp.f5vkh69n04 ++ rm /tmp/tmp.DH8hONj53P /tmp/tmp.f5vkh69n04 ++ return 0 + client_pod=pxc-client-6644d8898f-hrtz9 + wait_pod pxc-client-6644d8898f-hrtz9 + local pod=pxc-client-6644d8898f-hrtz9 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-hrtz9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-hrtz9 condition met pxc-client-6644d8898f-hrtz9.Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.M1vRg18QI7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.OgrgysZRH6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.M1vRg18QI7 ++ cat /tmp/tmp.OgrgysZRH6 ++ rm /tmp/tmp.M1vRg18QI7 /tmp/tmp.OgrgysZRH6 ++ return 0 + client_pod=pxc-client-6644d8898f-hrtz9 + wait_pod pxc-client-6644d8898f-hrtz9 + local pod=pxc-client-6644d8898f-hrtz9 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-hrtz9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-hrtz9 condition met pxc-client-6644d8898f-hrtz9.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.sCyU0e1yt2/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.sCyU0e1yt2/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rm1fRiu6uo +++ mktemp ++ local LAST_ERR=/tmp/tmp.mergq5VQxl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Rm1fRiu6uo ++ cat /tmp/tmp.mergq5VQxl ++ rm /tmp/tmp.Rm1fRiu6uo /tmp/tmp.mergq5VQxl ++ return 0 + client_pod=pxc-client-6644d8898f-hrtz9 + wait_pod pxc-client-6644d8898f-hrtz9 + local pod=pxc-client-6644d8898f-hrtz9 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-hrtz9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-hrtz9 condition met pxc-client-6644d8898f-hrtz9.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.sCyU0e1yt2/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.sCyU0e1yt2/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1-80.sql ']' + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -proot_password -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ndn6XtOytS +++ mktemp ++ local LAST_ERR=/tmp/tmp.lqTPIVrxBe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ndn6XtOytS ++ cat /tmp/tmp.lqTPIVrxBe ++ rm /tmp/tmp.Ndn6XtOytS /tmp/tmp.lqTPIVrxBe ++ return 0 + client_pod=pxc-client-6644d8898f-hrtz9 + wait_pod pxc-client-6644d8898f-hrtz9 + local pod=pxc-client-6644d8898f-hrtz9 + local max_retry=480 + local ns= ++ echo pxc-client-6644d8898f-hrtz9 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ egrep '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-6644d8898f-hrtz9 condition met pxc-client-6644d8898f-hrtz9.Ok + set +o xtrace + '[' '!' -s /tmp/tmp.sCyU0e1yt2/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.sCyU0e1yt2/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ kubectl_bin exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2E4SlSSXDk +++ mktemp ++ local LAST_ERR=/tmp/tmp.ntcFsVqJb2 ++ local exit_status=0 ++ egrep -o 'early-plugin-load=keyring_\w+.so' +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2E4SlSSXDk ++ cat /tmp/tmp.ntcFsVqJb2 Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.2E4SlSSXDk /tmp/tmp.ntcFsVqJb2 ++ return 0 + '[' '' ']' + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ jq '.status.loadBalancer.ingress[].hostname' ++ jq .key ++++ local LAST_OUT=/tmp/tmp.YQdoJQ2DL8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Ha4jsxg2Ly ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.YQdoJQ2DL8 ++++ cat /tmp/tmp.Ha4jsxg2Ly ++++ rm /tmp/tmp.YQdoJQ2DL8 /tmp/tmp.Ha4jsxg2Ly ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.99um9u6fJi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pzyYiUO4Ob ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.99um9u6fJi ++++ cat /tmp/tmp.pzyYiUO4Ob ++++ rm /tmp/tmp.99um9u6fJi /tmp/tmp.pzyYiUO4Ob ++++ return 0 +++ local ip=34.171.225.52 +++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' +++ echo 34.171.225.52 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.171.225.52/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 260 78 --:--:-- --:--:-- --:--:-- 339 + API_KEY='"eyJrIjoiR0pXV3BjUHlhdnI2ODBlb0ZPeXFRa1JwamNnOXF6aGgiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiR0pXV3BjUHlhdnI2ODBlb0ZPeXFRa1JwamNnOXF6aGgiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.8QnxIf7Ulc ++ mktemp + local LAST_ERR=/tmp/tmp.EuUcCVeuKE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiR0pXV3BjUHlhdnI2ODBlb0ZPeXFRa1JwamNnOXF6aGgiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8QnxIf7Ulc secret/my-cluster-secrets patched + cat /tmp/tmp.EuUcCVeuKE + rm /tmp/tmp.8QnxIf7Ulc /tmp/tmp.EuUcCVeuKE + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 2 ']' + echo 'Resource sts/monitoring-pxc is at generation 1. Waiting...' Resource sts/monitoring-pxc is at generation 1. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=36 + sleep 7 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OamQ98bjTs +++ mktemp ++ local LAST_ERR=/tmp/tmp.5nATJNpZbM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OamQ98bjTs ++ cat /tmp/tmp.5nATJNpZbM ++ rm /tmp/tmp.OamQ98bjTs /tmp/tmp.5nATJNpZbM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 0 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.yC1gOFzDux +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ny4OAnKWHI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.yC1gOFzDux ++ cat /tmp/tmp.Ny4OAnKWHI ++ rm /tmp/tmp.yC1gOFzDux /tmp/tmp.Ny4OAnKWHI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 1 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JdejRfdXf1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.VjtqdVblM7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JdejRfdXf1 ++ cat /tmp/tmp.VjtqdVblM7 ++ rm /tmp/tmp.JdejRfdXf1 /tmp/tmp.VjtqdVblM7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 2 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ytM2CqXLl2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.10pxvBoFRl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ytM2CqXLl2 ++ cat /tmp/tmp.10pxvBoFRl ++ rm /tmp/tmp.ytM2CqXLl2 /tmp/tmp.10pxvBoFRl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 3 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wlvi1smP0z +++ mktemp ++ local LAST_ERR=/tmp/tmp.Q8wXoedkTS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Wlvi1smP0z ++ cat /tmp/tmp.Q8wXoedkTS ++ rm /tmp/tmp.Wlvi1smP0z /tmp/tmp.Q8wXoedkTS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo 'waiting for cluster readyness' waiting for cluster readyness + sleep 20 + [[ 4 -ge 36 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.88ZN79l9Uz +++ mktemp ++ local LAST_ERR=/tmp/tmp.nIzO6dItqP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.88ZN79l9Uz ++ cat /tmp/tmp.nIzO6dItqP ++ rm /tmp/tmp.88ZN79l9Uz /tmp/tmp.nIzO6dItqP ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LzmyxVsUar +++ mktemp ++ local LAST_ERR=/tmp/tmp.BI0WbLsBMq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LzmyxVsUar ++ cat /tmp/tmp.BI0WbLsBMq ++ rm /tmp/tmp.LzmyxVsUar /tmp/tmp.BI0WbLsBMq ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.4rKclNRwEg ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.uwVWFLDfyI +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.4rKclNRwEg +++++ cat /tmp/tmp.uwVWFLDfyI +++++ rm /tmp/tmp.4rKclNRwEg /tmp/tmp.uwVWFLDfyI +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bmQQyGeTSs +++ mktemp ++ local LAST_ERR=/tmp/tmp.RNfRqxT8S0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bmQQyGeTSs ++ cat /tmp/tmp.RNfRqxT8S0 ++ rm /tmp/tmp.bmQQyGeTSs /tmp/tmp.RNfRqxT8S0 ++ return 0 + [[ 2 == \2 ]] + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.sCyU0e1yt2/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6105", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.aHucBdah8q ++ mktemp + local LAST_ERR=/tmp/tmp.kCBjiQWkcY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.aHucBdah8q + cat /tmp/tmp.kCBjiQWkcY + rm /tmp/tmp.aHucBdah8q /tmp/tmp.kCBjiQWkcY + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml /tmp/tmp.sCyU0e1yt2/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.sCyU0e1yt2/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6105", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.ZMMEwcUAul ++ mktemp + local LAST_ERR=/tmp/tmp.z2bVIuxh3J + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZMMEwcUAul + cat /tmp/tmp.z2bVIuxh3J + rm /tmp/tmp.ZMMEwcUAul /tmp/tmp.z2bVIuxh3J + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml /tmp/tmp.sCyU0e1yt2/statefulset_monitoring-haproxy.yml + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.IcJLg1YSaY ++ mktemp + local LAST_ERR=/tmp/tmp.40Equ4Qusj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IcJLg1YSaY secret/my-env-var-secrets created + cat /tmp/tmp.40Equ4Qusj + rm /tmp/tmp.IcJLg1YSaY /tmp/tmp.40Equ4Qusj + return 0 + desc 'add new PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add new PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' ++++ local LAST_OUT=/tmp/tmp.iT9KnNooFh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jvVpbobvKH ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.iT9KnNooFh ++++ cat /tmp/tmp.jvVpbobvKH ++++ rm /tmp/tmp.iT9KnNooFh /tmp/tmp.jvVpbobvKH ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7Z209jkFUh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bZjPFv5w7S ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.7Z209jkFUh ++++ cat /tmp/tmp.bZjPFv5w7S ++++ rm /tmp/tmp.7Z209jkFUh /tmp/tmp.bZjPFv5w7S ++++ return 0 +++ local ip=34.171.225.52 +++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' +++ echo 34.171.225.52 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.171.225.52/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 272 85 --:--:-- --:--:-- --:--:-- 358 + API_KEY_NEW='"eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.4uEaGZ2x9e ++ mktemp + local LAST_ERR=/tmp/tmp.1vYYMJd8M3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4uEaGZ2x9e secret/my-cluster-secrets patched + cat /tmp/tmp.1vYYMJd8M3 + rm /tmp/tmp.4uEaGZ2x9e /tmp/tmp.1vYYMJd8M3 + return 0 + desc 'delete old PMM key' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM key ----------------------------------------------------------------------------------- ++ jq '.[] | select( .name == "operator").id' +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HC4SQsog3s +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.So3i7v0U5M ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.HC4SQsog3s ++++ cat /tmp/tmp.So3i7v0U5M ++++ rm /tmp/tmp.HC4SQsog3s /tmp/tmp.So3i7v0U5M ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.G1nUzggEtp +++++ mktemp ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' ++++ local LAST_ERR=/tmp/tmp.ielmyd5yiU ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.G1nUzggEtp ++++ cat /tmp/tmp.ielmyd5yiU ++++ rm /tmp/tmp.G1nUzggEtp /tmp/tmp.ielmyd5yiU ++++ return 0 +++ local ip=34.171.225.52 +++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' +++ echo 34.171.225.52 +++ return ++ curl --insecure -X GET https://admin:admin@34.171.225.52/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 595 100 595 0 0 1334 0 --:--:-- --:--:-- --:--:-- 1334 + ID_API_KEY_OLD=6 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.a2Ut08NxzJ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.N4sHoq1ZwF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.a2Ut08NxzJ +++ cat /tmp/tmp.N4sHoq1ZwF +++ rm /tmp/tmp.a2Ut08NxzJ /tmp/tmp.N4sHoq1ZwF +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.VV7REleRT2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ZimWOiqErs +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.VV7REleRT2 +++ cat /tmp/tmp.ZimWOiqErs +++ rm /tmp/tmp.VV7REleRT2 /tmp/tmp.ZimWOiqErs +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + curl --insecure -X DELETE https://admin:admin@34.171.225.52/graph/api/auth/keys/6 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 65 0 --:--:-- --:--:-- --:--:-- 65 {"message":"API key deleted"}+ wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.sCyU0e1yt2/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.26 >= 1.29' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.26 >= 1.24' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.26 >= 1.22' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6105", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.Y2MtzAxDbj ++ mktemp + local LAST_ERR=/tmp/tmp.0Exny4nrwT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y2MtzAxDbj + cat /tmp/tmp.0Exny4nrwT + rm /tmp/tmp.Y2MtzAxDbj /tmp/tmp.0Exny4nrwT + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml /tmp/tmp.sCyU0e1yt2/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.sCyU0e1yt2/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.29' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.27' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.24 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.24' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k124.yml ']' + version_gt 1.22 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.22' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k122.yml ']' + version_gt 1.21 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.26 >= 1.21' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k121.yml ']' + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-6105", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.aOFRJWUBJm ++ mktemp + local LAST_ERR=/tmp/tmp.fnN5Gpigdb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.aOFRJWUBJm + cat /tmp/tmp.fnN5Gpigdb + rm /tmp/tmp.aOFRJWUBJm /tmp/tmp.fnN5Gpigdb + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml /tmp/tmp.sCyU0e1yt2/statefulset_monitoring-haproxy.yml + desc 'verify clients agents statuses' + set +o xtrace ----------------------------------------------------------------------------------- verify clients agents statuses ----------------------------------------------------------------------------------- + sleep 300 ++ getSecretData my-cluster-secrets pmmserverkey ++ local secretName=my-cluster-secrets ++ local dataKey=pmmserverkey ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.UjUDr56BMK +++ mktemp ++ local LAST_ERR=/tmp/tmp.z0Zn3owYgK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.UjUDr56BMK ++ cat /tmp/tmp.z0Zn3owYgK ++ rm /tmp/tmp.UjUDr56BMK /tmp/tmp.z0Zn3owYgK ++ return 0 + API_KEY=eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.41cqJhrYET +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.yGgfJjF21L ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.41cqJhrYET ++++ cat /tmp/tmp.yGgfJjF21L ++++ rm /tmp/tmp.41cqJhrYET /tmp/tmp.yGgfJjF21L ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.1N1USMCun2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9uzLZBxfQo ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.1N1USMCun2 ++++ cat /tmp/tmp.9uzLZBxfQo ++++ rm /tmp/tmp.1N1USMCun2 /tmp/tmp.9uzLZBxfQo ++++ return 0 +++ local ip=34.171.225.52 +++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' +++ echo 34.171.225.52 +++ return ++ get_mgmnt_service_list eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 34.171.225.52 monitoring-2-0-6105 ++ local api_key=eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 ++ local endpoint=34.171.225.52 ++ local namespace=monitoring-2-0-6105 ++ jq 'walk(if type == "array" then sort_by(.agent_type) else . end)' ++ jq 'walk(if type=="object" then with_entries(select(.key | test("service_id|node_id|agent_id|created_at|updated_at") | not)) else . end)' ++ curl -s -k -H 'Authorization: Bearer eyJrIjoieVJ1SUJyc0tuckxYVjA4MGM0Q1piYW1ON2x5enFYM04iLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9' -X POST https://34.171.225.52/v1/management/Service/List ++ /usr/bin/sed -i s/monitoring-2-0-6105-//g /tmp/tmp.sCyU0e1yt2/active_pmm_agents.json ++ cat /tmp/tmp.sCyU0e1yt2/active_pmm_agents.json ++ jq '.services | sort_by(.node_name)' ++ echo /tmp/tmp.sCyU0e1yt2/active_pmm_agents_sorted.json + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-1740/e2e-tests/monitoring-2-0/compare/agents-list.json /tmp/tmp.sCyU0e1yt2/active_pmm_agents_sorted.json + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719561812 ++ /usr/bin/date -u +%s + local end=1719561872 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PGhKUdOA8g ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fNM15ciPqM +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PGhKUdOA8g +++ cat /tmp/tmp.fNM15ciPqM +++ rm /tmp/tmp.PGhKUdOA8g /tmp/tmp.fNM15ciPqM +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.g3ejho57dq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JHRVnSv8ND +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.g3ejho57dq +++ cat /tmp/tmp.JHRVnSv8ND +++ rm /tmp/tmp.g3ejho57dq /tmp/tmp.JHRVnSv8ND +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + local endpoint=34.171.225.52 ++ curl -s -k 'https://admin:admin@34.171.225.52/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0%22%7D%29&start=1719561812&end=1719561872&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1719561812, "1719560657" ], [ 1719561872, "1719560657" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1719561812, "1719560657" ], [ 1719561872, "1719560657" ] ] }' = null ']' + grep '^"[0-9]' + jq '.values[][1]' + echo -n '{ "metric": {}, "values": [ [ 1719561812, "1719560657" ], [ 1719561872, "1719560657" ] ] }' "1719560657" "1719560657" + get_metric_values mysql_global_status_uptime pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719561815 ++ /usr/bin/date -u +%s + local end=1719561875 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.P4iyLFvrHS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.PXGhZhivi4 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.P4iyLFvrHS +++ cat /tmp/tmp.PXGhZhivi4 +++ rm /tmp/tmp.P4iyLFvrHS /tmp/tmp.PXGhZhivi4 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pkHk5jwK2X ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sNtDuCv7jI +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.pkHk5jwK2X +++ cat /tmp/tmp.sNtDuCv7jI +++ rm /tmp/tmp.pkHk5jwK2X /tmp/tmp.sNtDuCv7jI +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + local endpoint=34.171.225.52 ++ curl -s -k 'https://admin:admin@34.171.225.52/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-pxc-0%22%7D%29&start=1719561815&end=1719561875&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1719561815, "159" ], [ 1719561875, "219" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1719561815, "159" ], [ 1719561875, "219" ] ] }' = null ']' + jq '.values[][1]' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1719561815, "159" ], [ 1719561875, "219" ] ] }' "159" "219" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719561817 ++ /usr/bin/date -u +%s + local end=1719561877 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1QgvVzGchy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UyfTur9NTo +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.1QgvVzGchy +++ cat /tmp/tmp.UyfTur9NTo +++ rm /tmp/tmp.1QgvVzGchy /tmp/tmp.UyfTur9NTo +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.R9HiNHDEWk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.eOFRH4hNOu +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.R9HiNHDEWk +++ cat /tmp/tmp.eOFRH4hNOu +++ rm /tmp/tmp.R9HiNHDEWk /tmp/tmp.eOFRH4hNOu +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + local endpoint=34.171.225.52 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@34.171.225.52/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0%22%7D%29&start=1719561817&end=1719561877&step=60' + local 'result={ "metric": {}, "values": [ [ 1719561817, "0" ], [ 1719561877, "0" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1719561817, "0" ], [ 1719561877, "0" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1719561817, "0" ], [ 1719561877, "0" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "0" "0" + get_metric_values haproxy_backend_active_servers pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1719561819 ++ /usr/bin/date -u +%s + local end=1719561879 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ jq '.status.loadBalancer.ingress[].hostname' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ sed -e 's/^"//; s/"$//;' +++ local LAST_OUT=/tmp/tmp.6DpuUbJlLD ++++ mktemp +++ local LAST_ERR=/tmp/tmp.kPyR2Rqg6x +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.6DpuUbJlLD +++ cat /tmp/tmp.kPyR2Rqg6x +++ rm /tmp/tmp.6DpuUbJlLD /tmp/tmp.kPyR2Rqg6x +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dz9jKxX1Qh ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uHM3CNMJLN +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.dz9jKxX1Qh +++ cat /tmp/tmp.uHM3CNMJLN +++ rm /tmp/tmp.dz9jKxX1Qh /tmp/tmp.uHM3CNMJLN +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + local endpoint=34.171.225.52 ++ curl -s -k 'https://admin:admin@34.171.225.52/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-6105-monitoring-haproxy-0%22%7D%29&start=1719561819&end=1719561879&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1719561819, "1" ], [ 1719561879, "1" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1719561819, "1" ], [ 1719561879, "1" ] ] }' = null ']' + jq '.values[][1]' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1719561819, "1" ], [ 1719561879, "1" ] ] }' "1" "1" + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2024-06-28T07:34:41 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2024-06-28T08:04:41 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aR6hRs2khf ++++ mktemp +++ local LAST_ERR=/tmp/tmp.01bZjU7Cmw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aR6hRs2khf +++ cat /tmp/tmp.01bZjU7Cmw +++ rm /tmp/tmp.aR6hRs2khf /tmp/tmp.01bZjU7Cmw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zrOxGGyHQR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.zVMw4gTpTH +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.zrOxGGyHQR +++ cat /tmp/tmp.zVMw4gTpTH +++ rm /tmp/tmp.zrOxGGyHQR /tmp/tmp.zVMw4gTpTH +++ return 0 ++ local ip=34.171.225.52 ++ '[' -n 34.171.225.52 -a 34.171.225.52 '!=' null ']' ++ echo 34.171.225.52 ++ return + local endpoint=34.171.225.52 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@34.171.225.52/v0/qan/GetReport + jq '.rows[].fingerprint' null + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BACoC2l9zV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.U7qVWATqxd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BACoC2l9zV +++ cat /tmp/tmp.U7qVWATqxd +++ rm /tmp/tmp.BACoC2l9zV /tmp/tmp.U7qVWATqxd +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2VRc1L3ibR ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ke63kpSYe2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2VRc1L3ibR +++ cat /tmp/tmp.ke63kpSYe2 +++ rm /tmp/tmp.2VRc1L3ibR /tmp/tmp.ke63kpSYe2 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ jq -r .pmm_agent_status.node_id +++ local LAST_OUT=/tmp/tmp.48lJrHoOrr ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6ANx4MqJL1 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.48lJrHoOrr +++ cat /tmp/tmp.6ANx4MqJL1 +++ rm /tmp/tmp.48lJrHoOrr /tmp/tmp.6ANx4MqJL1 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TVGhXbnIp0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JJlCljNzra +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TVGhXbnIp0 +++ cat /tmp/tmp.JJlCljNzra +++ rm /tmp/tmp.TVGhXbnIp0 /tmp/tmp.JJlCljNzra +++ return 0 ++ echo /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service +++ grep /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.zyklTgZCTR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.mwVjmvk32l +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.zyklTgZCTR +++++ cat /tmp/tmp.mwVjmvk32l +++++ rm /tmp/tmp.zyklTgZCTR /tmp/tmp.mwVjmvk32l +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.WoGMUxpTZE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.m02FiosPkm ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.WoGMUxpTZE ++++ cat /tmp/tmp.m02FiosPkm ++++ rm /tmp/tmp.WoGMUxpTZE /tmp/tmp.m02FiosPkm ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.rv6sS5U6Me +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rdxS4UhJHE ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.rv6sS5U6Me ++++ cat /tmp/tmp.rdxS4UhJHE ++++ rm /tmp/tmp.rv6sS5U6Me /tmp/tmp.rdxS4UhJHE ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3RdJGYtLJj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.l7v85vJPRQ +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.3RdJGYtLJj +++ cat /tmp/tmp.l7v85vJPRQ +++ rm /tmp/tmp.3RdJGYtLJj /tmp/tmp.l7v85vJPRQ +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.k4ytJpzQOd ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.AzjDVSWKoj +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.k4ytJpzQOd +++++ cat /tmp/tmp.AzjDVSWKoj +++++ rm /tmp/tmp.k4ytJpzQOd /tmp/tmp.AzjDVSWKoj +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KRsr1JuPy5 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.wrPCwDznIB ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.KRsr1JuPy5 ++++ cat /tmp/tmp.wrPCwDznIB ++++ rm /tmp/tmp.KRsr1JuPy5 /tmp/tmp.wrPCwDznIB ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CPkZf5Q8rI +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ttoeBSSUBQ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.CPkZf5Q8rI ++++ cat /tmp/tmp.ttoeBSSUBQ ++++ rm /tmp/tmp.CPkZf5Q8rI /tmp/tmp.ttoeBSSUBQ ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.j8sdXMulsp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.4K9J46qW1R +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.j8sdXMulsp +++ cat /tmp/tmp.4K9J46qW1R +++ rm /tmp/tmp.j8sdXMulsp /tmp/tmp.4K9J46qW1R +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.4egWDnQfSx ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ohHpemfTar +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.4egWDnQfSx +++++ cat /tmp/tmp.ohHpemfTar +++++ rm /tmp/tmp.4egWDnQfSx /tmp/tmp.ohHpemfTar +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xVZXgQb4Yr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.RtJutqkDdW ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.xVZXgQb4Yr ++++ cat /tmp/tmp.RtJutqkDdW ++++ rm /tmp/tmp.xVZXgQb4Yr /tmp/tmp.RtJutqkDdW ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.qHvFYQTzkA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.OWERPg1FIb ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.qHvFYQTzkA ++++ cat /tmp/tmp.OWERPg1FIb ++++ rm /tmp/tmp.qHvFYQTzkA /tmp/tmp.OWERPg1FIb ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.rYj3B2W188 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.bkRNHiST8n +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.rYj3B2W188 +++ cat /tmp/tmp.bkRNHiST8n +++ rm /tmp/tmp.rYj3B2W188 /tmp/tmp.bkRNHiST8n +++ return 0 ++ echo /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab ']' + kubectl_bin patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.CGrS1c00D2 ++ mktemp + local LAST_ERR=/tmp/tmp.h8jdZwsttJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.CGrS1c00D2 perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.h8jdZwsttJ + rm /tmp/tmp.CGrS1c00D2 /tmp/tmp.h8jdZwsttJ + return 0 + wait_for_delete pod/monitoring-pxc-0 + local res=pod/monitoring-pxc-0 + echo -n 'pod/monitoring-pxc-0 - ' pod/monitoring-pxc-0 - + set +o xtrace ...................Error from server (NotFound): pods "monitoring-pxc-0" not found + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/b21f6070-ec8d-4757-87ab-8a2ea1197569 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.uKFaKJYXbe ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.M0mmGhLnjd +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.uKFaKJYXbe +++++ cat /tmp/tmp.M0mmGhLnjd +++++ rm /tmp/tmp.uKFaKJYXbe /tmp/tmp.M0mmGhLnjd +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zvO2yvQ9Mj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.mUpsPskuCp ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.zvO2yvQ9Mj ++++ cat /tmp/tmp.mUpsPskuCp ++++ rm /tmp/tmp.zvO2yvQ9Mj /tmp/tmp.mUpsPskuCp ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.eFf03wwAn4 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.o9BCmjZuYn ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.eFf03wwAn4 ++++ cat /tmp/tmp.o9BCmjZuYn ++++ rm /tmp/tmp.eFf03wwAn4 /tmp/tmp.o9BCmjZuYn ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.XZpMqJdroe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hpeyQxjYQm +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.XZpMqJdroe +++ cat /tmp/tmp.hpeyQxjYQm +++ rm /tmp/tmp.XZpMqJdroe /tmp/tmp.hpeyQxjYQm +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/c7319dd5-e32a-4611-92f7-c9afe8c22b3c ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.k6r75fuDCi ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.aRnHZIcXaK +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.k6r75fuDCi +++++ cat /tmp/tmp.aRnHZIcXaK +++++ rm /tmp/tmp.k6r75fuDCi /tmp/tmp.aRnHZIcXaK +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.pgKwabu8lq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.0ubM0DoQ7a ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.pgKwabu8lq ++++ cat /tmp/tmp.0ubM0DoQ7a ++++ rm /tmp/tmp.pgKwabu8lq /tmp/tmp.0ubM0DoQ7a ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.XwO9IojgYz +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fmcZF4CQhT ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.XwO9IojgYz ++++ cat /tmp/tmp.fmcZF4CQhT ++++ rm /tmp/tmp.XwO9IojgYz /tmp/tmp.fmcZF4CQhT ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aSc1MtME9U ++++ mktemp +++ local LAST_ERR=/tmp/tmp.XD27XjsRk9 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aSc1MtME9U +++ cat /tmp/tmp.XD27XjsRk9 +++ rm /tmp/tmp.aSc1MtME9U /tmp/tmp.XD27XjsRk9 +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/6bb9dc3b-52d8-46b9-8abc-08c05d2ce8ab +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.48fbpzt3ZQ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.msW2436Gb1 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.48fbpzt3ZQ +++++ cat /tmp/tmp.msW2436Gb1 +++++ rm /tmp/tmp.48fbpzt3ZQ /tmp/tmp.msW2436Gb1 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ egrep -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Z9rF6pBzFC +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6LISCwrmgA ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.Z9rF6pBzFC ++++ cat /tmp/tmp.6LISCwrmgA ++++ rm /tmp/tmp.Z9rF6pBzFC /tmp/tmp.6LISCwrmgA ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Mvz2MsAdQ7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hWQAYil085 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.Mvz2MsAdQ7 ++++ cat /tmp/tmp.hWQAYil085 ++++ rm /tmp/tmp.Mvz2MsAdQ7 /tmp/tmp.hWQAYil085 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oZjAD0Ve3e ++++ mktemp +++ local LAST_ERR=/tmp/tmp.6KqYeOkeC6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-6105 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.171.225.52/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.oZjAD0Ve3e +++ cat /tmp/tmp.6KqYeOkeC6 +++ rm /tmp/tmp.oZjAD0Ve3e /tmp/tmp.6KqYeOkeC6 +++ return 0 ++ echo + [[ -n '' ]] + desc 'check for passwords leak' + set +o xtrace ----------------------------------------------------------------------------------- check for passwords leak ----------------------------------------------------------------------------------- + check_passwords_leak + local secrets + local passwords + local pods ++ kubectl_bin get secrets -o json ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or endswith(".sql") or contains("release") or contains("namespace") or contains("AWS_ACCESS_KEY_ID") or contains("AZURE_STORAGE_ACCOUNT_NAME")) | not) | .value' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hlEOkqD2hz +++ mktemp ++ local LAST_ERR=/tmp/tmp.fic1OoBBnK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets -o json ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hlEOkqD2hz ++ cat /tmp/tmp.fic1OoBBnK ++ rm /tmp/tmp.hlEOkqD2hz /tmp/tmp.fic1OoBBnK ++ return 0 + secrets='WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0=' + echo secrets=WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0= secrets=WEo0K0hITFVQMDJJN0daTm14RWhESDJGT0liTW9sZ0pYU1d0cmw2Vw== K1N6aDhFYjBJWjBvMjV0SXZBei9yN1NzZG1EeWN0M09ENUNMMzIzblZvbFpZdlorWGV3ZFkxM2ZYRW9HM2pHc1o3L05uSWovdTl2ZStBU3R5TEtRa2c9PQo= VEtVS0N0dkFZRC9uR1p3dEYxa0hBOFFuTXpoNlFHMFlrUlZJK0ZLSQ== ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk c29tZS1zZWNyZXQta2V5 ZjErZUEzNTNvUFcsOTxiST8hNw== b3BlcmF0b3JhZG1pbg== YWRtaW4= ZXlKcklqb2llVkoxU1VKeWMwdHVja3hZVmpBNE1HTTBRMXBpWVcxT04yeDVlbkZZTTA0aUxDSnVJam9pYjNCbGNtRjBiM0l0Ym1WM0lpd2lhV1FpT2pGOQ== YWRtaW5fcGFzc3dvcmQ= cmVwbF9wYXNzd29yZA== cm9vdF9wYXNzd29yZA== YmFja3VwX3Bhc3N3b3Jk cHhjLXByZWZpeC0= ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo ++ for i in '$secrets' ++ base64 -d ++ echo + passwords='XJ4+HHLUP02I7GZNmxEhDH2FOIbMolgJXSWtrl6W +Szh8Eb0IZ0o25tIvAz/r7SsdmDyct3OD5CL323nVolZYvZ+XewdY13fXEoG3jGsZ7/NnIj/u9ve+AStyLKQkg== TKUKCtvAYD/nGZwtF1kHA8QnMzh6QG0YkRVI+FKI f1+eA353oPW,910.214.186.7:33062: read: connection reset by peer sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2 + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n monitoring-2-0-6105 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.3ujGTjkgfv ++ mktemp + local LAST_ERR=/tmp/tmp.pQN7o8bwbL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3ujGTjkgfv perconaxtradbcluster.pxc.percona.com "monitoring" deleted + cat /tmp/tmp.pQN7o8bwbL + rm /tmp/tmp.3ujGTjkgfv /tmp/tmp.pQN7o8bwbL + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.1hiRSTH23c ++ mktemp + local LAST_ERR=/tmp/tmp.TqKXzLVKCv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1hiRSTH23c No resources found + cat /tmp/tmp.TqKXzLVKCv + rm /tmp/tmp.1hiRSTH23c /tmp/tmp.TqKXzLVKCv + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ewP6hBJXda ++ mktemp + local LAST_ERR=/tmp/tmp.p0bMbp07yY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ewP6hBJXda No resources found + cat /tmp/tmp.p0bMbp07yY + rm /tmp/tmp.ewP6hBJXda /tmp/tmp.p0bMbp07yY + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.bMLDD5Ryfk ++ mktemp + local LAST_ERR=/tmp/tmp.U8PFZwyQFT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bMLDD5Ryfk validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.U8PFZwyQFT + rm /tmp/tmp.bMLDD5Ryfk /tmp/tmp.U8PFZwyQFT + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.14.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.sCyU0e1yt2 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-6105 ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.kJejL0mKqJ + local LAST_OUT=/tmp/tmp.AIds6HkIsc ++ mktemp + local LAST_ERR=/tmp/tmp.Z9hBXxbX5P + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.DOGUfa1zR4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-6105