Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/logs/monitoring-2-0-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-15296 + local ns=monitoring-2-0-15296 + '[' -n pxc-operator ']' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-8388 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.OOSnHzOzef ++ mktemp + local LAST_ERR=/tmp/tmp.qJoE63zgUc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OOSnHzOzef perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-8388 namespace + cat /tmp/tmp.qJoE63zgUc + rm /tmp/tmp.OOSnHzOzef /tmp/tmp.qJoE63zgUc + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.RdGngsHyad ++ mktemp + local LAST_ERR=/tmp/tmp.FjCBxzE4LJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RdGngsHyad No resources found + cat /tmp/tmp.FjCBxzE4LJ + rm /tmp/tmp.RdGngsHyad /tmp/tmp.FjCBxzE4LJ + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.lfZv2w8ZqQ ++ mktemp + local LAST_ERR=/tmp/tmp.oT5ZroDAIu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lfZv2w8ZqQ No resources found + cat /tmp/tmp.oT5ZroDAIu + rm /tmp/tmp.lfZv2w8ZqQ /tmp/tmp.oT5ZroDAIu + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + xargs kubectl delete ns + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.pEFCevHa7I + awk '{print$1}' ++ mktemp + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_ERR=/tmp/tmp.HHhUAabf45 + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + local LAST_OUT=/tmp/tmp.UxYjHVBGVk ++ mktemp + local LAST_ERR=/tmp/tmp.MmAXtRIsCc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UxYjHVBGVk + cat /tmp/tmp.MmAXtRIsCc + rm /tmp/tmp.UxYjHVBGVk /tmp/tmp.MmAXtRIsCc + return 0 namespace "monitoring-2-0-8388" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pEFCevHa7I namespace "pxc-operator" deleted + cat /tmp/tmp.HHhUAabf45 + rm /tmp/tmp.pEFCevHa7I /tmp/tmp.HHhUAabf45 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hbysoWFP0z ++ mktemp + local LAST_ERR=/tmp/tmp.jE0rDunkRu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hbysoWFP0z namespace/pxc-operator created + cat /tmp/tmp.jE0rDunkRu + rm /tmp/tmp.hbysoWFP0z /tmp/tmp.jE0rDunkRu + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.8lkdiL1dF4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3aTkoqbcMS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8lkdiL1dF4 ++ cat /tmp/tmp.3aTkoqbcMS ++ rm /tmp/tmp.8lkdiL1dF4 /tmp/tmp.3aTkoqbcMS ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.WgIC6KHgbS ++ mktemp + local LAST_ERR=/tmp/tmp.bZesmwCz2f + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WgIC6KHgbS Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8" modified. + cat /tmp/tmp.bZesmwCz2f + rm /tmp/tmp.WgIC6KHgbS /tmp/tmp.bZesmwCz2f + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.xFR0zsoTGH ++ mktemp + local LAST_ERR=/tmp/tmp.d3pVoL4CEM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xFR0zsoTGH customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.d3pVoL4CEM + rm /tmp/tmp.xFR0zsoTGH /tmp/tmp.d3pVoL4CEM + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.3oaVNRU17J ++ mktemp + local LAST_ERR=/tmp/tmp.uHZ4UK3rna + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3oaVNRU17J clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.uHZ4UK3rna + rm /tmp/tmp.3oaVNRU17J /tmp/tmp.uHZ4UK3rna + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2154-7a623b10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/deploy/cw-operator.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.ssjzomHFBF ++ mktemp + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + local LAST_ERR=/tmp/tmp.OASfFTm7dQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ssjzomHFBF deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.OASfFTm7dQ + rm /tmp/tmp.ssjzomHFBF /tmp/tmp.OASfFTm7dQ + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.cvFKI0mzEM ++ mktemp + local LAST_ERR=/tmp/tmp.jtUqviYKsJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cvFKI0mzEM pod/percona-xtradb-cluster-operator-6cf85965f9-ldkj4 condition met + cat /tmp/tmp.jtUqviYKsJ + rm /tmp/tmp.cvFKI0mzEM /tmp/tmp.jtUqviYKsJ + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.LlXD9Rrr1x +++ mktemp ++ local LAST_ERR=/tmp/tmp.sMkR2wHUcw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LlXD9Rrr1x ++ cat /tmp/tmp.sMkR2wHUcw ++ rm /tmp/tmp.LlXD9Rrr1x /tmp/tmp.sMkR2wHUcw ++ return 0 + wait_pod percona-xtradb-cluster-operator-6cf85965f9-ldkj4 480 pxc-operator + local pod=percona-xtradb-cluster-operator-6cf85965f9-ldkj4 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-6cf85965f9-ldkj4 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-6cf85965f9-ldkj4 condition met waiting for pod/percona-xtradb-cluster-operator-6cf85965f9-ldkj4 to become Ready.Ok + sleep 3 + create_namespace monitoring-2-0-15296 + local namespace=monitoring-2-0-15296 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-15296' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-15296 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-15296 + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.MHrXTwJBX4 ++ mktemp + local LAST_ERR=/tmp/tmp.rSR8miR0bq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.PlL6ZM7gSA ++ mktemp + local LAST_ERR=/tmp/tmp.FPU7YuF49k + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-15296 + awk '{print$1}' + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-15296 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MHrXTwJBX4 + cat /tmp/tmp.rSR8miR0bq + rm /tmp/tmp.MHrXTwJBX4 /tmp/tmp.rSR8miR0bq + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-15296 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.PlL6ZM7gSA + cat /tmp/tmp.FPU7YuF49k Error from server (NotFound): namespaces "monitoring-2-0-15296" not found + rm /tmp/tmp.PlL6ZM7gSA /tmp/tmp.FPU7YuF49k + return 1 + : + wait_for_delete namespace/monitoring-2-0-15296 + local res=namespace/monitoring-2-0-15296 + echo -n 'waiting for namespace/monitoring-2-0-15296 to be deleted' waiting for namespace/monitoring-2-0-15296 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-15296" not found + desc 'create namespace monitoring-2-0-15296' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-15296 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-15296 ++ mktemp + local LAST_OUT=/tmp/tmp.j1zv9Mz9Oa ++ mktemp + local LAST_ERR=/tmp/tmp.EcpVEPA2vL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-15296 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.j1zv9Mz9Oa namespace/monitoring-2-0-15296 created + cat /tmp/tmp.EcpVEPA2vL + rm /tmp/tmp.j1zv9Mz9Oa /tmp/tmp.EcpVEPA2vL + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Vg63lMYAAS +++ mktemp ++ local LAST_ERR=/tmp/tmp.QFjMgtNU9i ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Vg63lMYAAS ++ cat /tmp/tmp.QFjMgtNU9i ++ rm /tmp/tmp.Vg63lMYAAS /tmp/tmp.QFjMgtNU9i ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8 --namespace=monitoring-2-0-15296 ++ mktemp + local LAST_OUT=/tmp/tmp.aDxqi0l5II ++ mktemp + local LAST_ERR=/tmp/tmp.QazGGXh4eo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8 --namespace=monitoring-2-0-15296 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.aDxqi0l5II Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2154-7a623b10-7-cluster8" modified. + cat /tmp/tmp.QazGGXh4eo + rm /tmp/tmp.aDxqi0l5II /tmp/tmp.QazGGXh4eo + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.syJCMUANiX ++ mktemp + local LAST_ERR=/tmp/tmp.cOOGfMCPJA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.syJCMUANiX secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.cOOGfMCPJA + rm /tmp/tmp.syJCMUANiX /tmp/tmp.cOOGfMCPJA + return 0 + deploy_helm monitoring-2-0-15296 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Wed Nov 19 17:29:53 2025 NAMESPACE: monitoring-2-0-15296 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-15296.svc.cluster.local:443 login: admin password: admin + kubectl wait pod monitoring-0 --for=condition=Ready --timeout=420s pod/monitoring-0 condition met + kubectl_bin wait --for=condition=Ready pod/monitoring-0 --timeout=120s ++ mktemp + local LAST_OUT=/tmp/tmp.xHL2Zt9b0k ++ mktemp + local LAST_ERR=/tmp/tmp.f3eamkXsIm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod/monitoring-0 --timeout=120s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xHL2Zt9b0k pod/monitoring-0 condition met + cat /tmp/tmp.f3eamkXsIm + rm /tmp/tmp.xHL2Zt9b0k /tmp/tmp.f3eamkXsIm + return 0 + kubectl_bin exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' ++ mktemp + local LAST_OUT=/tmp/tmp.eWSB3lOx1b ++ mktemp + local LAST_ERR=/tmp/tmp.JsSm1EobTP + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eWSB3lOx1b + cat /tmp/tmp.JsSm1EobTP + rm /tmp/tmp.eWSB3lOx1b /tmp/tmp.JsSm1EobTP + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.oUGc0tHzll +++ mktemp ++ local LAST_ERR=/tmp/tmp.GVHk7382PZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oUGc0tHzll ++ cat /tmp/tmp.GVHk7382PZ ++ rm /tmp/tmp.oUGc0tHzll /tmp/tmp.GVHk7382PZ ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.u85iODK76r ++ mktemp + local LAST_ERR=/tmp/tmp.AWa6oLGYYd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.u85iODK76r logger=settings t=2025-11-19T17:30:15.420541354Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2025-11-19T17:30:15.420672044Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2025-11-19T17:30:15.420683194Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2025-11-19T17:30:15.420689104Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2025-11-19T17:30:15.420694204Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2025-11-19T17:30:15.420699284Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2025-11-19T17:30:15.420704794Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2025-11-19T17:30:15.420709704Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2025-11-19T17:30:15.420714574Z level=info msg="App mode production" logger=sqlstore t=2025-11-19T17:30:15.420776794Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2025-11-19T17:30:15.434409903Z level=info msg="Starting DB migrations" logger=migrator t=2025-11-19T17:30:15.439813423Z level=info msg="migrations completed" performed=0 skipped=452 duration=430.62µs logger=secrets t=2025-11-19T17:30:15.441204193Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2025-11-19T17:30:15.47491685Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2025-11-19T17:30:15.591284121Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2025-11-19T17:30:15.591309871Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2025-11-19T17:30:15.591348581Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2025-11-19T17:30:15.591367231Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2025-11-19T17:30:15.591382431Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2025-11-19T17:30:15.596845471Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.loader t=2025-11-19T17:30:15.597116641Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2025-11-19T17:30:15.597129061Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2025-11-19T17:30:15.597134721Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2025-11-19T17:30:15.60743071Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2025-11-19T17:30:15.60745207Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2025-11-19T17:30:15.60746048Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2025-11-19T17:30:15.60746633Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2025-11-19T17:30:15.60747257Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2025-11-19T17:30:15.60747778Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2025-11-19T17:30:15.60748376Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2025-11-19T17:30:15.60749198Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2025-11-19T17:30:15.60751074Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2025-11-19T17:30:15.60751679Z level=info msg="Plugin registered" pluginID=pmm-app Admin password changed successfully ✔ + cat /tmp/tmp.AWa6oLGYYd + rm /tmp/tmp.u85iODK76r /tmp/tmp.AWa6oLGYYd + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.QXhKu8ru9v ++ mktemp + local LAST_ERR=/tmp/tmp.OLZmelNF69 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QXhKu8ru9v secret/my-cluster-secrets created + cat /tmp/tmp.OLZmelNF69 + rm /tmp/tmp.QXhKu8ru9v /tmp/tmp.OLZmelNF69 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2154-7a623b10#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-15296~ ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.idjOTibZEK ++ mktemp + local LAST_ERR=/tmp/tmp.5oWuObgQQY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.idjOTibZEK deployment.apps/pxc-client created + cat /tmp/tmp.5oWuObgQQY + rm /tmp/tmp.idjOTibZEK /tmp/tmp.5oWuObgQQY + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/monitoring.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/monitoring.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/monitoring.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2154-7a623b10#' + local LAST_OUT=/tmp/tmp.H1kzzDUFKr + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-15296~ + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + local LAST_ERR=/tmp/tmp.5hBs9xgVhX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H1kzzDUFKr perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.5hBs9xgVhX + rm /tmp/tmp.H1kzzDUFKr /tmp/tmp.5hBs9xgVhX + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mmjNseKhj6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MeY5kHMjQN +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.mmjNseKhj6 +++ cat /tmp/tmp.MeY5kHMjQN +++ rm /tmp/tmp.mmjNseKhj6 /tmp/tmp.MeY5kHMjQN +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-15296 ++ mktemp + local LAST_OUT=/tmp/tmp.lvi0I4JNiy ++ mktemp + local LAST_ERR=/tmp/tmp.098k8tLmuj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-15296 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-15296 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lvi0I4JNiy pod/monitoring-haproxy-0 condition met pod/monitoring-pxc-0 condition met + cat /tmp/tmp.098k8tLmuj + rm /tmp/tmp.lvi0I4JNiy /tmp/tmp.098k8tLmuj + return 0 + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.vr8LgGmCmT +++ mktemp ++ local LAST_ERR=/tmp/tmp.aK2BzJgjXh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vr8LgGmCmT ++ cat /tmp/tmp.aK2BzJgjXh ++ rm /tmp/tmp.vr8LgGmCmT /tmp/tmp.aK2BzJgjXh ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I6oqD0laRZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Ptua8oAsl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I6oqD0laRZ ++ cat /tmp/tmp.6Ptua8oAsl ++ rm /tmp/tmp.I6oqD0laRZ /tmp/tmp.6Ptua8oAsl ++ return 0 + client_pod=pxc-client-59944c5bbf-jp4vm + wait_pod pxc-client-59944c5bbf-jp4vm + local pod=pxc-client-59944c5bbf-jp4vm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-jp4vm ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-jp4vm condition met waiting for pod/pxc-client-59944c5bbf-jp4vm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JgZLExjd8D +++ mktemp ++ local LAST_ERR=/tmp/tmp.4sycN4Yomi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JgZLExjd8D ++ cat /tmp/tmp.4sycN4Yomi ++ rm /tmp/tmp.JgZLExjd8D /tmp/tmp.4sycN4Yomi ++ return 0 + client_pod=pxc-client-59944c5bbf-jp4vm + wait_pod pxc-client-59944c5bbf-jp4vm + local pod=pxc-client-59944c5bbf-jp4vm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-jp4vm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-jp4vm condition met waiting for pod/pxc-client-59944c5bbf-jp4vm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NntTFhkcbb +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ns53MBjAeY ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NntTFhkcbb ++ cat /tmp/tmp.Ns53MBjAeY ++ rm /tmp/tmp.NntTFhkcbb /tmp/tmp.Ns53MBjAeY ++ return 0 + client_pod=pxc-client-59944c5bbf-jp4vm + wait_pod pxc-client-59944c5bbf-jp4vm + local pod=pxc-client-59944c5bbf-jp4vm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-jp4vm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-jp4vm condition met waiting for pod/pxc-client-59944c5bbf-jp4vm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.HXYCfGXVn6/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.HXYCfGXVn6/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OzvNndG3JW +++ mktemp ++ local LAST_ERR=/tmp/tmp.i2uX0rsKUX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OzvNndG3JW ++ cat /tmp/tmp.i2uX0rsKUX ++ rm /tmp/tmp.OzvNndG3JW /tmp/tmp.i2uX0rsKUX ++ return 0 + client_pod=pxc-client-59944c5bbf-jp4vm + wait_pod pxc-client-59944c5bbf-jp4vm + local pod=pxc-client-59944c5bbf-jp4vm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-jp4vm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-jp4vm condition met waiting for pod/pxc-client-59944c5bbf-jp4vm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.HXYCfGXVn6/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.HXYCfGXVn6/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jbZ3oTglp7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Pf1jMgche ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jbZ3oTglp7 ++ cat /tmp/tmp.4Pf1jMgche ++ rm /tmp/tmp.jbZ3oTglp7 /tmp/tmp.4Pf1jMgche ++ return 0 + client_pod=pxc-client-59944c5bbf-jp4vm + wait_pod pxc-client-59944c5bbf-jp4vm + local pod=pxc-client-59944c5bbf-jp4vm + local max_retry=480 + local ns= ++ echo pxc-client-59944c5bbf-jp4vm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-59944c5bbf-jp4vm condition met waiting for pod/pxc-client-59944c5bbf-jp4vm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.HXYCfGXVn6/select-1.sql ']' + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.HXYCfGXVn6/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ kubectl_bin exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ grep -E -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZQHWzB6JJG +++ mktemp ++ local LAST_ERR=/tmp/tmp.8XssZCcQ3Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZQHWzB6JJG ++ cat /tmp/tmp.8XssZCcQ3Y Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.ZQHWzB6JJG /tmp/tmp.8XssZCcQ3Y ++ return 0 + '[' '' ']' + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ local LAST_OUT=/tmp/tmp.iNurE0eK55 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eJR72hyMBR ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.iNurE0eK55 ++++ cat /tmp/tmp.eJR72hyMBR ++++ rm /tmp/tmp.iNurE0eK55 /tmp/tmp.eJR72hyMBR ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hmij2HKbKh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.tViMn3KAke ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.hmij2HKbKh ++++ cat /tmp/tmp.tViMn3KAke ++++ rm /tmp/tmp.hmij2HKbKh /tmp/tmp.tViMn3KAke ++++ return 0 +++ local ip=34.29.75.64 +++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' +++ echo 34.29.75.64 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.29.75.64/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 36 0 0 100 36 0 83 --:--:-- --:--:-- --:--:-- 83 100 155 100 119 100 36 269 81 --:--:-- --:--:-- --:--:-- 349 + API_KEY='"eyJrIjoiMWJhazhCN0QwRUlxeHd5cDdPZXdSYnhmSEpzbkJmUWMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiMWJhazhCN0QwRUlxeHd5cDdPZXdSYnhmSEpzbkJmUWMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.HomJc5Ef0m ++ mktemp + local LAST_ERR=/tmp/tmp.J6atBO4VMN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiMWJhazhCN0QwRUlxeHd5cDdPZXdSYnhmSEpzbkJmUWMiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HomJc5Ef0m secret/my-cluster-secrets patched + cat /tmp/tmp.J6atBO4VMN + rm /tmp/tmp.HomJc5Ef0m /tmp/tmp.J6atBO4VMN + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 2 ']' + echo 'Resource sts/monitoring-pxc is at generation 1. Waiting...' Resource sts/monitoring-pxc is at generation 1. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/monitoring to be ready' waiting for pxc/monitoring to be ready++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZypmXSMU9s +++ mktemp ++ local LAST_ERR=/tmp/tmp.oHGDeqj1HS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZypmXSMU9s ++ cat /tmp/tmp.oHGDeqj1HS ++ rm /tmp/tmp.ZypmXSMU9s /tmp/tmp.oHGDeqj1HS ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gNB2bNoI2C +++ mktemp ++ local LAST_ERR=/tmp/tmp.nQoWL3OmOO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gNB2bNoI2C ++ cat /tmp/tmp.nQoWL3OmOO ++ rm /tmp/tmp.gNB2bNoI2C /tmp/tmp.nQoWL3OmOO ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c6EQxlP5sl +++ mktemp ++ local LAST_ERR=/tmp/tmp.BMn7EFpwTF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.c6EQxlP5sl ++ cat /tmp/tmp.BMn7EFpwTF ++ rm /tmp/tmp.c6EQxlP5sl /tmp/tmp.BMn7EFpwTF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tF8vW6o3dk +++ mktemp ++ local LAST_ERR=/tmp/tmp.UxvKAhc1Hi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tF8vW6o3dk ++ cat /tmp/tmp.UxvKAhc1Hi ++ rm /tmp/tmp.tF8vW6o3dk /tmp/tmp.UxvKAhc1Hi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1bc78JbRPY +++ mktemp ++ local LAST_ERR=/tmp/tmp.XiP94HMlGw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1bc78JbRPY ++ cat /tmp/tmp.XiP94HMlGw ++ rm /tmp/tmp.1bc78JbRPY /tmp/tmp.XiP94HMlGw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pYDA3tOdJM +++ mktemp ++ local LAST_ERR=/tmp/tmp.S5A8v07SFa ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pYDA3tOdJM ++ cat /tmp/tmp.S5A8v07SFa ++ rm /tmp/tmp.pYDA3tOdJM /tmp/tmp.S5A8v07SFa ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CQsUOanjLe +++ mktemp ++ local LAST_ERR=/tmp/tmp.qN8r5SVOxu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CQsUOanjLe ++ cat /tmp/tmp.qN8r5SVOxu ++ rm /tmp/tmp.CQsUOanjLe /tmp/tmp.qN8r5SVOxu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.hTPIezbyBq +++ mktemp ++ local LAST_ERR=/tmp/tmp.cFeocjeSdo ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.hTPIezbyBq ++ cat /tmp/tmp.cFeocjeSdo ++ rm /tmp/tmp.hTPIezbyBq /tmp/tmp.cFeocjeSdo ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.jLVRbbAxsf +++ mktemp ++ local LAST_ERR=/tmp/tmp.e3b5X3os2x ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jLVRbbAxsf ++ cat /tmp/tmp.e3b5X3os2x ++ rm /tmp/tmp.jLVRbbAxsf /tmp/tmp.e3b5X3os2x ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xTPFkkyyWO +++ mktemp ++ local LAST_ERR=/tmp/tmp.OVekm9ujGp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xTPFkkyyWO ++ cat /tmp/tmp.OVekm9ujGp ++ rm /tmp/tmp.xTPFkkyyWO /tmp/tmp.OVekm9ujGp ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.1QSvQLfAz7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.FMbCmnK93h ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.1QSvQLfAz7 ++ cat /tmp/tmp.FMbCmnK93h ++ rm /tmp/tmp.1QSvQLfAz7 /tmp/tmp.FMbCmnK93h ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pWGNqv199o +++ mktemp ++ local LAST_ERR=/tmp/tmp.6Q7fHOQoGC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pWGNqv199o ++ cat /tmp/tmp.6Q7fHOQoGC ++ rm /tmp/tmp.pWGNqv199o /tmp/tmp.6Q7fHOQoGC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cL241jA6CI +++ mktemp ++ local LAST_ERR=/tmp/tmp.iCE1qfGx51 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cL241jA6CI ++ cat /tmp/tmp.iCE1qfGx51 ++ rm /tmp/tmp.cL241jA6CI /tmp/tmp.iCE1qfGx51 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.il0kCe8dEE +++ mktemp ++ local LAST_ERR=/tmp/tmp.QQpDgGzuvQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.il0kCe8dEE ++ cat /tmp/tmp.QQpDgGzuvQ ++ rm /tmp/tmp.il0kCe8dEE /tmp/tmp.QQpDgGzuvQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LrTMg7Aruc +++ mktemp ++ local LAST_ERR=/tmp/tmp.QBQWnU7RKm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LrTMg7Aruc ++ cat /tmp/tmp.QBQWnU7RKm ++ rm /tmp/tmp.LrTMg7Aruc /tmp/tmp.QBQWnU7RKm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zGUTc1R9sy +++ mktemp ++ local LAST_ERR=/tmp/tmp.EXIl3l8NBu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zGUTc1R9sy ++ cat /tmp/tmp.EXIl3l8NBu ++ rm /tmp/tmp.zGUTc1R9sy /tmp/tmp.EXIl3l8NBu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.MtJ7eO5hk2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.g4MVcf3Kvi ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.MtJ7eO5hk2 ++ cat /tmp/tmp.g4MVcf3Kvi ++ rm /tmp/tmp.MtJ7eO5hk2 /tmp/tmp.g4MVcf3Kvi ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8fwwU9cIPM +++ mktemp ++ local LAST_ERR=/tmp/tmp.zuw6KMHtJe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8fwwU9cIPM ++ cat /tmp/tmp.zuw6KMHtJe ++ rm /tmp/tmp.8fwwU9cIPM /tmp/tmp.zuw6KMHtJe ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.icGZinH5SJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.g0LdGcPZdd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.icGZinH5SJ ++ cat /tmp/tmp.g0LdGcPZdd ++ rm /tmp/tmp.icGZinH5SJ /tmp/tmp.g0LdGcPZdd ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ksw9R7Lbt4 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.JqBPyZ8DqD +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.ksw9R7Lbt4 +++++ cat /tmp/tmp.JqBPyZ8DqD +++++ rm /tmp/tmp.ksw9R7Lbt4 /tmp/tmp.JqBPyZ8DqD +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TMUUxOQXaZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.h3F7rg5Cez ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TMUUxOQXaZ ++ cat /tmp/tmp.h3F7rg5Cez ++ rm /tmp/tmp.TMUUxOQXaZ /tmp/tmp.h3F7rg5Cez ++ return 0 + [[ 2 == \2 ]] + echo + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.HXYCfGXVn6/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-aks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15296", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.c6YygUJBCQ ++ mktemp + local LAST_ERR=/tmp/tmp.UQBcsEsUNi + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c6YygUJBCQ + cat /tmp/tmp.UQBcsEsUNi + rm /tmp/tmp.c6YygUJBCQ /tmp/tmp.UQBcsEsUNi + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml /tmp/tmp.HXYCfGXVn6/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.HXYCfGXVn6/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.33' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15296", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.DU7WoTNYmX ++ mktemp + local LAST_ERR=/tmp/tmp.u9HGHt71D3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DU7WoTNYmX + cat /tmp/tmp.u9HGHt71D3 + rm /tmp/tmp.DU7WoTNYmX /tmp/tmp.u9HGHt71D3 + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml /tmp/tmp.HXYCfGXVn6/statefulset_monitoring-haproxy.yml + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.Bf2pDDLxiT ++ mktemp + local LAST_ERR=/tmp/tmp.3yfZ6FmKf7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Bf2pDDLxiT secret/my-env-var-secrets created + cat /tmp/tmp.3yfZ6FmKf7 + rm /tmp/tmp.Bf2pDDLxiT /tmp/tmp.3yfZ6FmKf7 + return 0 + wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + desc 'add new PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add new PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.EGPvPBzOyA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.vosCKGAqg9 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.EGPvPBzOyA ++++ cat /tmp/tmp.vosCKGAqg9 ++++ rm /tmp/tmp.EGPvPBzOyA /tmp/tmp.vosCKGAqg9 ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2As2GIEMaY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pwmL7iJf9O ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.2As2GIEMaY ++++ cat /tmp/tmp.pwmL7iJf9O ++++ rm /tmp/tmp.2As2GIEMaY /tmp/tmp.pwmL7iJf9O ++++ return 0 +++ local ip=34.29.75.64 +++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' +++ echo 34.29.75.64 +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.29.75.64/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 288 90 --:--:-- --:--:-- --:--:-- 379 + API_KEY_NEW='"eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.rqEDed9F9n ++ mktemp + local LAST_ERR=/tmp/tmp.M7DqgbA79R + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rqEDed9F9n secret/my-cluster-secrets patched + cat /tmp/tmp.M7DqgbA79R + rm /tmp/tmp.rqEDed9F9n /tmp/tmp.M7DqgbA79R + return 0 + desc 'delete old PMM key' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM key ----------------------------------------------------------------------------------- ++ jq '.[] | select( .name == "operator").id' +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].hostname' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HjVgDpBQJE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pJS6mn7gxD ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.HjVgDpBQJE ++++ cat /tmp/tmp.pJS6mn7gxD ++++ rm /tmp/tmp.HjVgDpBQJE /tmp/tmp.pJS6mn7gxD ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ jq '.status.loadBalancer.ingress[].ip' ++++ sed -e 's/^"//; s/"$//;' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vKq42WZoHi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.6SFPDkZuvQ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.vKq42WZoHi ++++ cat /tmp/tmp.6SFPDkZuvQ ++++ rm /tmp/tmp.vKq42WZoHi /tmp/tmp.6SFPDkZuvQ ++++ return 0 +++ local ip=34.29.75.64 +++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' +++ echo 34.29.75.64 +++ return ++ curl --insecure -X GET https://admin:admin@34.29.75.64/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 602 100 602 0 0 1413 0 --:--:-- --:--:-- --:--:-- 1413 + ID_API_KEY_OLD=6 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PzDm4dX0SP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uMWP1EjFde +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PzDm4dX0SP +++ cat /tmp/tmp.uMWP1EjFde +++ rm /tmp/tmp.PzDm4dX0SP /tmp/tmp.uMWP1EjFde +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NRr2Dh93Tb ++++ mktemp +++ local LAST_ERR=/tmp/tmp.R5BFPAeaP4 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.NRr2Dh93Tb +++ cat /tmp/tmp.R5BFPAeaP4 +++ rm /tmp/tmp.NRr2Dh93Tb /tmp/tmp.R5BFPAeaP4 +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + curl --insecure -X DELETE https://admin:admin@34.29.75.64/graph/api/auth/keys/6 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 67 0 --:--:-- --:--:-- --:--:-- 67 {"message":"API key deleted"}+ wait_for_generation sts/monitoring-pxc 4 + local resource=sts/monitoring-pxc + local target_generation=4 + echo 'Waiting for sts/monitoring-pxc to reach generation 4...' Waiting for sts/monitoring-pxc to reach generation 4... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-pxc has reached generation 4.' Resource sts/monitoring-pxc has reached generation 4. + break + wait_for_generation sts/monitoring-haproxy 4 + local resource=sts/monitoring-haproxy + local target_generation=4 + echo 'Waiting for sts/monitoring-haproxy to reach generation 4...' Waiting for sts/monitoring-haproxy to reach generation 4... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 4.' Resource sts/monitoring-haproxy has reached generation 4. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.HXYCfGXVn6/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-aks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15296", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.nteZpGC87S ++ mktemp + local LAST_ERR=/tmp/tmp.IS1ZquHFoI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nteZpGC87S + cat /tmp/tmp.IS1ZquHFoI + rm /tmp/tmp.nteZpGC87S /tmp/tmp.IS1ZquHFoI + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml /tmp/tmp.HXYCfGXVn6/statefulset_monitoring-pxc.yml + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.HXYCfGXVn6/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k129.yml ']' + version_gt 1.27 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-oc.yml ']' + version_gt 1.29 + desc 'return true if kubernetes version equal or greater than desired' + set +o xtrace ----------------------------------------------------------------------------------- return true if kubernetes version equal or greater than desired ----------------------------------------------------------------------------------- ++ bc -l ++ echo '1.31 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-15296", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.RDpraMlEhf ++ mktemp + local LAST_ERR=/tmp/tmp.Q7AsXTOXKq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RDpraMlEhf + cat /tmp/tmp.Q7AsXTOXKq + rm /tmp/tmp.RDpraMlEhf /tmp/tmp.Q7AsXTOXKq + return 0 + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml /tmp/tmp.HXYCfGXVn6/statefulset_monitoring-haproxy.yml + desc 'verify clients agents statuses' + set +o xtrace ----------------------------------------------------------------------------------- verify clients agents statuses ----------------------------------------------------------------------------------- + sleep 300 ++ getSecretData my-cluster-secrets pmmserverkey ++ local secretName=my-cluster-secrets ++ local dataKey=pmmserverkey ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.kQ06cTM0dU +++ mktemp ++ local LAST_ERR=/tmp/tmp.T6Ku2gbRZj ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.kQ06cTM0dU ++ cat /tmp/tmp.T6Ku2gbRZj ++ rm /tmp/tmp.kQ06cTM0dU /tmp/tmp.T6Ku2gbRZj ++ return 0 + API_KEY=eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o json +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.43UOZ5q8Sm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fXfHCFS0Eo ++++ local exit_status=0 ++++ jq '.status.loadBalancer.ingress[].hostname' ++++ sed -e 's/^"//; s/"$//;' +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.43UOZ5q8Sm ++++ cat /tmp/tmp.fXfHCFS0Eo ++++ rm /tmp/tmp.43UOZ5q8Sm /tmp/tmp.fXfHCFS0Eo ++++ return 0 +++ local hostname=null +++ '[' -n null -a null '!=' null ']' ++++ kubectl_bin get service/monitoring-service -o json ++++ sed -e 's/^"//; s/"$//;' ++++ jq '.status.loadBalancer.ingress[].ip' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.jJWNQZwPqf +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UXSKNyikgL ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o json ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.jJWNQZwPqf ++++ cat /tmp/tmp.UXSKNyikgL ++++ rm /tmp/tmp.jJWNQZwPqf /tmp/tmp.UXSKNyikgL ++++ return 0 +++ local ip=34.29.75.64 +++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' +++ echo 34.29.75.64 +++ return ++ get_mgmnt_service_list eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 34.29.75.64 monitoring-2-0-15296 ++ local api_key=eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 ++ local endpoint=34.29.75.64 ++ local namespace=monitoring-2-0-15296 ++ jq 'walk(if type=="object" then with_entries(select(.key | test("service_id|node_id|agent_id|created_at|updated_at") | not)) else . end)' ++ curl -s -k -H 'Authorization: Bearer eyJrIjoiOEN0cTZLTkRWZmgzWTlCdkhyNHRVOW5HWW9Za21sNVoiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9' -X POST https://34.29.75.64/v1/management/Service/List ++ jq 'walk(if type == "array" then sort_by(.agent_type) else . end)' ++ /usr/bin/sed -i s/monitoring-2-0-15296-//g /tmp/tmp.HXYCfGXVn6/active_pmm_agents.json ++ jq '.services | sort_by(.node_name)' ++ cat /tmp/tmp.HXYCfGXVn6/active_pmm_agents.json ++ echo /tmp/tmp.HXYCfGXVn6/active_pmm_agents_sorted.json + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2154/e2e-tests/monitoring-2-0/compare/agents-list.json /tmp/tmp.HXYCfGXVn6/active_pmm_agents_sorted.json + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1763574440 ++ /usr/bin/date -u +%s + local end=1763574500 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.qsfYAIxxql ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Vk2lEiZpig +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.qsfYAIxxql +++ cat /tmp/tmp.Vk2lEiZpig +++ rm /tmp/tmp.qsfYAIxxql /tmp/tmp.Vk2lEiZpig +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PT93EKuI5o ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rycMs21V19 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PT93EKuI5o +++ cat /tmp/tmp.rycMs21V19 +++ rm /tmp/tmp.PT93EKuI5o /tmp/tmp.rycMs21V19 +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + local endpoint=34.29.75.64 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@34.29.75.64/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0%22%7D%29&start=1763574440&end=1763574500&step=60' + local 'result={ "metric": {}, "values": [ [ 1763574440, "1763570275" ], [ 1763574500, "1763570275" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1763574440, "1763570275" ], [ 1763574500, "1763570275" ] ] }' = null ']' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1763574440, "1763570275" ], [ 1763574500, "1763570275" ] ] }' + jq '.values[][1]' "1763570275" "1763570275" + get_metric_values mysql_global_status_uptime pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1763574443 ++ /usr/bin/date -u +%s + local end=1763574503 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ jq '.status.loadBalancer.ingress[].hostname' +++ local LAST_OUT=/tmp/tmp.a0wjpDUKQn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Cu5VyR91Mn +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.a0wjpDUKQn +++ cat /tmp/tmp.Cu5VyR91Mn +++ rm /tmp/tmp.a0wjpDUKQn /tmp/tmp.Cu5VyR91Mn +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8UVzDZbCmH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TypybwHQGY +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.8UVzDZbCmH +++ cat /tmp/tmp.TypybwHQGY +++ rm /tmp/tmp.8UVzDZbCmH /tmp/tmp.TypybwHQGY +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + local endpoint=34.29.75.64 ++ curl -s -k 'https://admin:admin@34.29.75.64/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-pxc-0%22%7D%29&start=1763574443&end=1763574503&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1763574443, "161" ], [ 1763574503, "221" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1763574443, "161" ], [ 1763574503, "221" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1763574443, "161" ], [ 1763574503, "221" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "161" "221" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1763574445 ++ /usr/bin/date -u +%s + local end=1763574505 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BhfpXklucw ++++ mktemp +++ local LAST_ERR=/tmp/tmp.JnkCtbtyME +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BhfpXklucw +++ cat /tmp/tmp.JnkCtbtyME +++ rm /tmp/tmp.BhfpXklucw /tmp/tmp.JnkCtbtyME +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ jq '.status.loadBalancer.ingress[].ip' +++ kubectl_bin get service/monitoring-service -o json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1rOGsc8YyE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Hqm7Bx2Z1o +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.1rOGsc8YyE +++ cat /tmp/tmp.Hqm7Bx2Z1o +++ rm /tmp/tmp.1rOGsc8YyE /tmp/tmp.Hqm7Bx2Z1o +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + local endpoint=34.29.75.64 ++ curl -s -k 'https://admin:admin@34.29.75.64/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0%22%7D%29&start=1763574445&end=1763574505&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1763574445, "0" ], [ 1763574505, "0" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1763574445, "0" ], [ 1763574505, "0" ] ] }' = null ']' + jq '.values[][1]' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1763574445, "0" ], [ 1763574505, "0" ] ] }' "0" "0" + get_metric_values haproxy_backend_active_servers pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1763574447 ++ /usr/bin/date -u +%s + local end=1763574507 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Pog6WoeSqN ++++ mktemp +++ local LAST_ERR=/tmp/tmp.oVzNhbTdlw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Pog6WoeSqN +++ cat /tmp/tmp.oVzNhbTdlw +++ rm /tmp/tmp.Pog6WoeSqN /tmp/tmp.oVzNhbTdlw +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ jq '.status.loadBalancer.ingress[].ip' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ezB5orLy7j ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EnRBosL2cD +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ezB5orLy7j +++ cat /tmp/tmp.EnRBosL2cD +++ rm /tmp/tmp.ezB5orLy7j /tmp/tmp.EnRBosL2cD +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + local endpoint=34.29.75.64 ++ curl -s -k 'https://admin:admin@34.29.75.64/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-15296-monitoring-haproxy-0%22%7D%29&start=1763574447&end=1763574507&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1763574447, "1" ], [ 1763574507, "1" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1763574447, "1" ], [ 1763574507, "1" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1763574447, "1" ], [ 1763574507, "1" ] ] }' + grep '^"[0-9]' + jq '.values[][1]' "1" "1" + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2025-11-19T17:18:29 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2025-11-19T17:48:29 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].hostname' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.guCSEoKhpG ++++ mktemp +++ local LAST_ERR=/tmp/tmp.ElK4udo2Y9 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ sed -e 's/^"//; s/"$//;' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.guCSEoKhpG +++ cat /tmp/tmp.ElK4udo2Y9 +++ rm /tmp/tmp.guCSEoKhpG /tmp/tmp.ElK4udo2Y9 +++ return 0 ++ local hostname=null ++ '[' -n null -a null '!=' null ']' +++ kubectl_bin get service/monitoring-service -o json +++ jq '.status.loadBalancer.ingress[].ip' +++ sed -e 's/^"//; s/"$//;' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.K5mgtwk5yc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9a8pRx0v4K +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.K5mgtwk5yc +++ cat /tmp/tmp.9a8pRx0v4K +++ rm /tmp/tmp.K5mgtwk5yc /tmp/tmp.9a8pRx0v4K +++ return 0 ++ local ip=34.29.75.64 ++ '[' -n 34.29.75.64 -a 34.29.75.64 '!=' null ']' ++ echo 34.29.75.64 ++ return + local endpoint=34.29.75.64 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + jq '.rows[].fingerprint' + curl -s -k -XPOST -d @payload.json https://admin:admin@34.29.75.64/v0/qan/GetReport null + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jRa2BBNl5K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Xv4UTgbCeU +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.jRa2BBNl5K +++ cat /tmp/tmp.Xv4UTgbCeU +++ rm /tmp/tmp.jRa2BBNl5K /tmp/tmp.Xv4UTgbCeU +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5ZWz4IzK1i ++++ mktemp +++ local LAST_ERR=/tmp/tmp.pFrABH739y +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.5ZWz4IzK1i +++ cat /tmp/tmp.pFrABH739y +++ rm /tmp/tmp.5ZWz4IzK1i /tmp/tmp.pFrABH739y +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ktSa9GciBo ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dc4mqMWiFT +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ktSa9GciBo +++ cat /tmp/tmp.dc4mqMWiFT +++ rm /tmp/tmp.ktSa9GciBo /tmp/tmp.dc4mqMWiFT +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uyAml0sY8b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.diun4r4ISO +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uyAml0sY8b +++ cat /tmp/tmp.diun4r4ISO +++ rm /tmp/tmp.uyAml0sY8b /tmp/tmp.diun4r4ISO +++ return 0 ++ echo /node_id/69e5f234-eceb-4e41-8d74-8db171de472c /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 /node_id/dd73be8d-7357-40c2-987b-519854523706 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/69e5f234-eceb-4e41-8d74-8db171de472c /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 /node_id/dd73be8d-7357-40c2-987b-519854523706 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/69e5f234-eceb-4e41-8d74-8db171de472c ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.m7ATdpIhCE ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.G2lPvCRIdl +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.m7ATdpIhCE +++++ cat /tmp/tmp.G2lPvCRIdl +++++ rm /tmp/tmp.m7ATdpIhCE /tmp/tmp.G2lPvCRIdl +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sBrMbaL1lG +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.zRAKnyWOXs ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.sBrMbaL1lG ++++ cat /tmp/tmp.zRAKnyWOXs ++++ rm /tmp/tmp.sBrMbaL1lG /tmp/tmp.zRAKnyWOXs ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.SnHHMRm0ug +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.9fl85PgJ2S ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.SnHHMRm0ug ++++ cat /tmp/tmp.9fl85PgJ2S ++++ rm /tmp/tmp.SnHHMRm0ug /tmp/tmp.9fl85PgJ2S ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x514GZmkY2 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yQjxgLTQAX +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.x514GZmkY2 +++ cat /tmp/tmp.yQjxgLTQAX +++ rm /tmp/tmp.x514GZmkY2 /tmp/tmp.yQjxgLTQAX +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ awk '{print $4}' +++ grep /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.QJycVKA5Kh ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.nR5Q0qVCkf +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.QJycVKA5Kh +++++ cat /tmp/tmp.nR5Q0qVCkf +++++ rm /tmp/tmp.QJycVKA5Kh /tmp/tmp.nR5Q0qVCkf +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.CdDgOpD8fq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.KpCLZC1hRG ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.CdDgOpD8fq ++++ cat /tmp/tmp.KpCLZC1hRG ++++ rm /tmp/tmp.CdDgOpD8fq /tmp/tmp.KpCLZC1hRG ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nqCjDED8nk +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5S2urQii5s ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.nqCjDED8nk ++++ cat /tmp/tmp.5S2urQii5s ++++ rm /tmp/tmp.nqCjDED8nk /tmp/tmp.5S2urQii5s ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.vIQa4EeJL5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.YqiMw4xWOd +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.vIQa4EeJL5 +++ cat /tmp/tmp.YqiMw4xWOd +++ rm /tmp/tmp.vIQa4EeJL5 /tmp/tmp.YqiMw4xWOd +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep /node_id/dd73be8d-7357-40c2-987b-519854523706 +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.AukkYcTd3A ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.UDUlkrcP8Y +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.AukkYcTd3A +++++ cat /tmp/tmp.UDUlkrcP8Y +++++ rm /tmp/tmp.AukkYcTd3A /tmp/tmp.UDUlkrcP8Y +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.lbo5eTYQyq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.v2jeigzht4 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.lbo5eTYQyq ++++ cat /tmp/tmp.v2jeigzht4 ++++ rm /tmp/tmp.lbo5eTYQyq /tmp/tmp.v2jeigzht4 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.e738dvlazq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.0h6c2XlunE ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.e738dvlazq ++++ cat /tmp/tmp.0h6c2XlunE ++++ rm /tmp/tmp.e738dvlazq /tmp/tmp.0h6c2XlunE ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.SbBBnCJBHW ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iVOZhZIWxS +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.SbBBnCJBHW +++ cat /tmp/tmp.iVOZhZIWxS +++ rm /tmp/tmp.SbBBnCJBHW /tmp/tmp.iVOZhZIWxS +++ return 0 ++ echo /node_id/69e5f234-eceb-4e41-8d74-8db171de472c /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 /node_id/dd73be8d-7357-40c2-987b-519854523706 + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/69e5f234-eceb-4e41-8d74-8db171de472c ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/dd73be8d-7357-40c2-987b-519854523706 ']' + kubectl_bin patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.QRZTJaLAvR ++ mktemp + local LAST_ERR=/tmp/tmp.rkS4xgTmxY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QRZTJaLAvR perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.rkS4xgTmxY + rm /tmp/tmp.QRZTJaLAvR /tmp/tmp.rkS4xgTmxY + return 0 + wait_for_delete pod/monitoring-pxc-0 + local res=pod/monitoring-pxc-0 + echo -n 'waiting for pod/monitoring-pxc-0 to be deleted' waiting for pod/monitoring-pxc-0 to be deleted+ set +o xtrace .................Error from server (NotFound): pods "monitoring-pxc-0" not found + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/69e5f234-eceb-4e41-8d74-8db171de472c /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 /node_id/dd73be8d-7357-40c2-987b-519854523706 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/69e5f234-eceb-4e41-8d74-8db171de472c +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.dqYKK5LhES ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.BZoBIIa8VV +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.dqYKK5LhES +++++ cat /tmp/tmp.BZoBIIa8VV +++++ rm /tmp/tmp.dqYKK5LhES /tmp/tmp.BZoBIIa8VV +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Y30Zml9XdV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.M4ZfeL4kl0 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.Y30Zml9XdV ++++ cat /tmp/tmp.M4ZfeL4kl0 ++++ rm /tmp/tmp.Y30Zml9XdV /tmp/tmp.M4ZfeL4kl0 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.q6lE5zCGe3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eNEoIeM81d ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.q6lE5zCGe3 ++++ cat /tmp/tmp.eNEoIeM81d ++++ rm /tmp/tmp.q6lE5zCGe3 /tmp/tmp.eNEoIeM81d ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9fILpN3ESF ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HZdWzWwQgm +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.9fILpN3ESF +++ cat /tmp/tmp.HZdWzWwQgm +++ rm /tmp/tmp.9fILpN3ESF /tmp/tmp.HZdWzWwQgm +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/2b596e92-c29e-4d1e-b01e-9261f5fb6c79 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.8ua7DOqRhI ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NBDo6ciV5u +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.8ua7DOqRhI +++++ cat /tmp/tmp.NBDo6ciV5u +++++ rm /tmp/tmp.8ua7DOqRhI /tmp/tmp.NBDo6ciV5u +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.y8wjjmYF1Q +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.XVPsC4AvRS ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.y8wjjmYF1Q ++++ cat /tmp/tmp.XVPsC4AvRS ++++ rm /tmp/tmp.y8wjjmYF1Q /tmp/tmp.XVPsC4AvRS ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.RdKqVk91mn +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.LL6NcPdyMI ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.RdKqVk91mn ++++ cat /tmp/tmp.LL6NcPdyMI ++++ rm /tmp/tmp.RdKqVk91mn /tmp/tmp.LL6NcPdyMI ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2HBJ76FkRV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KHyehZ8QP0 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2HBJ76FkRV +++ cat /tmp/tmp.KHyehZ8QP0 +++ rm /tmp/tmp.2HBJ76FkRV /tmp/tmp.KHyehZ8QP0 +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/dd73be8d-7357-40c2-987b-519854523706 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ awk '{print $4}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.56LAEzeObt ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.OGoM72CVtc +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.56LAEzeObt +++++ cat /tmp/tmp.OGoM72CVtc +++++ rm /tmp/tmp.56LAEzeObt /tmp/tmp.OGoM72CVtc +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.6CWgYc8I1e +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.opJqe5alDm ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.6CWgYc8I1e ++++ cat /tmp/tmp.opJqe5alDm ++++ rm /tmp/tmp.6CWgYc8I1e /tmp/tmp.opJqe5alDm ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MnHmDKMOYD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bsQixXGQFo ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.MnHmDKMOYD ++++ cat /tmp/tmp.bsQixXGQFo ++++ rm /tmp/tmp.MnHmDKMOYD /tmp/tmp.bsQixXGQFo ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.mBjX1gL2W6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yhL8a6pugT +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-15296 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.29.75.64/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.mBjX1gL2W6 +++ cat /tmp/tmp.yhL8a6pugT +++ rm /tmp/tmp.mBjX1gL2W6 /tmp/tmp.yhL8a6pugT +++ return 0 ++ echo + [[ -n '' ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-15296 + local namespace=monitoring-2-0-15296 + local ignore_logs=true + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ + sort -u + tee /tmp/tmp.HXYCfGXVn6/operator.log + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v 'get backup status: Job.batch' +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.uaoOLnKN9A +++ mktemp ++ local LAST_ERR=/tmp/tmp.NDbXg6MV9D ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uaoOLnKN9A ++ cat /tmp/tmp.NDbXg6MV9D ++ rm /tmp/tmp.uaoOLnKN9A /tmp/tmp.NDbXg6MV9D ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-6cf85965f9-ldkj4 ++ mktemp + local LAST_OUT=/tmp/tmp.RDDKO4n1xm ++ mktemp + local LAST_ERR=/tmp/tmp.kOjNUJzixk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-6cf85965f9-ldkj4 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RDDKO4n1xm + cat /tmp/tmp.kOjNUJzixk + rm /tmp/tmp.RDDKO4n1xm /tmp/tmp.kOjNUJzixk + return 0 } }, }, { }, }, { }, }, { }, }, }, - }, - { - }, + }, ... // 11 identical elements ... // 12 identical elements ... // 16 identical fields ... // 16 identical fields 2025-11-19T17:29:09.782Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.13-gke.1377000"} 2025-11-19T17:29:09.783Z INFO setup Manager starting up {"gitCommit": "7a623b10a97567887377e516f24d3500d7412fc7", "gitBranch": "PR-2154-7a623b10", "buildTime": "2025-11-19T16:30:53Z", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} 2025-11-19T17:29:09.786Z INFO setup Registering Components. 2025-11-19T17:29:10.175Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-11-19T17:29:10.175Z INFO controller-runtime.metrics Starting metrics server 2025-11-19T17:29:10.175Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-11-19T17:29:10.175Z INFO controller-runtime.webhook Starting webhook server 2025-11-19T17:29:10.175Z INFO setup Starting the Cmd. 2025-11-19T17:29:10.175Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-11-19T17:29:10.176Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-11-19T17:29:10.176Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-11-19T17:29:10.176Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-11-19T17:29:10.277Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-11-19T17:29:10.307Z DEBUG events percona-xtradb-cluster-operator-6cf85965f9-ldkj4_3e381bab-64b9-4030-bf45-e72aed7b27ab became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"0fe3bd16-3ef9-4361-8bcd-6497f8412640","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1763573350300591009"}, "reason": "LeaderElection"} 2025-11-19T17:29:10.307Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.Secret"} 2025-11-19T17:29:10.307Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-11-19T17:29:10.308Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-11-19T17:29:10.308Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-11-19T17:29:10.308Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-11-19T17:29:10.408Z INFO Starting Controller {"controller": "pxc-controller"} 2025-11-19T17:29:10.408Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2025-11-19T17:29:10.509Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2025-11-19T17:29:10.509Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2025-11-19T17:29:10.509Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2025-11-19T17:29:10.509Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2025-11-19T17:30:21.243Z INFO Set CR version {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "version": "1.19.0"} 2025-11-19T17:30:23.079Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-19T17:30:23.130Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-19T17:30:23.241Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-19T17:30:23.355Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-19T17:30:23.454Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-19T17:30:23.704Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4fabd35b-6025-4bbe-920c-8947ed3207fa", "object": "monitoring-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-19T17:30:24.674Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "a3f94f32-0ee8-4259-8473-b45dfea4412d", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-19T17:30:24.699Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "a3f94f32-0ee8-4259-8473-b45dfea4412d", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-19T17:31:35.561Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006", "user": "operator"} 2025-11-19T17:31:35.596Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006", "user": "monitor"} 2025-11-19T17:31:35.643Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006"} 2025-11-19T17:31:35.681Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006"} 2025-11-19T17:31:35.711Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006", "user": "xtrabackup"} 2025-11-19T17:31:35.750Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006"} 2025-11-19T17:31:35.778Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "437bab6b-bf55-4572-9dcf-553fdc0c1006", "user": "replication"} 2025-11-19T17:34:01.508Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "46f3e2a1-de3d-462f-98ac-4a6ed7b91fd9", "user": "root"} 2025-11-19T17:34:01.641Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "46f3e2a1-de3d-462f-98ac-4a6ed7b91fd9", "new version": "8.0.43-34.1"} 2025-11-19T17:37:02.577Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "7fec1087-81d4-4a72-b417-3f18d3625629", "user": "pmmserverkey"} 2025-11-19T17:37:08.201Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "e990fed3-e071-4ae8-96da-a0cb304c261d", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:37:08.601Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "e990fed3-e071-4ae8-96da-a0cb304c261d", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:37:08.747Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "e990fed3-e071-4ae8-96da-a0cb304c261d", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:38:01.542Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "cafa9b5e-f9b6-4ed5-9ea6-5c2619af8c88", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-15296 on 34.118.224.10:53: no such host"} 2025-11-19T17:38:26.742Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "2ee2685c-bfb5-425a-9fed-736937292d48", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.163.48.30:33062: i/o timeout"} 2025-11-19T17:39:55.411Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4bec4317-478e-4b4b-8885-77da0e20ec1c", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:39:55.583Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4bec4317-478e-4b4b-8885-77da0e20ec1c", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:39:55.803Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "4bec4317-478e-4b4b-8885-77da0e20ec1c", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:40:54.756Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "f859513d-8bd6-4e04-a3b5-3c61ffa8ca19", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-15296 on 34.118.224.10:53: no such host"} 2025-11-19T17:41:46.292Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "a733eb8d-ae8d-48a6-be7a-c7e643ce5d25", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp 10.163.49.23:33062: connect: connection refused"} 2025-11-19T17:42:23.691Z INFO Password changed, updating user {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "user": "pmmserverkey"} 2025-11-19T17:42:23.720Z INFO HAProxy pods will be restarted {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "last-applied-secret": "4247eaaee55a475b7ef0e15cc8802eb640dda70f4a7451716291b33c049d614e"} 2025-11-19T17:42:23.720Z INFO Internal secrets updated {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "user": "pmmserverkey"} 2025-11-19T17:42:23.720Z INFO PXC pods will be restarted {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "last-applied-secret": "4247eaaee55a475b7ef0e15cc8802eb640dda70f4a7451716291b33c049d614e"} 2025-11-19T17:42:23.723Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:42:23.792Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "ff3460e3-539d-495f-b7a3-6e7337cc6b89", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:42:24.963Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "8b00f4fd-d041-48e9-9f08-d57d7d28e30d", "err": "failed to connect to pod monitoring-pxc-2: dial tcp: lookup monitoring-pxc-2.monitoring-pxc.monitoring-2-0-15296 on 34.118.224.10:53: no such host"} 2025-11-19T17:43:25.435Z INFO reconcile replication error {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "c3ab6df5-54e8-4382-a2d6-dbbe773e628f", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.163.48.32:33062: connect: connection refused"} 2025-11-19T17:49:02.190Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "d6aca63b-6a36-41cb-b4f0-0a1facff4413", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-11-19T17:49:02.254Z DEBUG Updating object {"controller": "pxc-controller", "namespace": "monitoring-2-0-15296", "name": "monitoring", "reconcileID": "d6aca63b-6a36-41cb-b4f0-0a1facff4413", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} ... // 22 identical elements ... // 22 identical fields ... // 23 identical elements ... // 25 identical elements ... // 26 identical elements ... // 2 identical entries ... // 2 identical fields ... // 2 identical fields ... // 2 identical fields ... // 2 identical fields ... // 3 identical fields ... // 3 identical fields ... // 3 identical fields ... // 4 identical elements ... // 4 identical fields ... // 4 identical fields ... // 5 identical fields ... // 5 identical fields ... // 6 identical fields ... // 7 identical fields ... // 8 identical fields ... // 9 identical elements ... // 9 identical fields ... // 9 identical fields AccessModes: nil, ActiveDeadlineSeconds: nil, Affinity: nil, Affinity: nil, Annotations: map[string]string{ - Annotations: map[string]string{ + Annotations: map[string]string{ + APIVersion: "", - APIVersion: "apps/v1", - APIVersion: "apps/v1", - APIVersion: "v1", Args: {"haproxy"}, Args: {"mysqld"}, Args: nil, AutomountServiceAccountToken: nil, + AvailableReplicas: 0, - AvailableReplicas: 2, - AvailableReplicas: 3, AWSElasticBlockStore: nil, AzureFile: nil, Capacity: nil, - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, ConfigMapKeyRef: nil, ConfigMap: &v1.ConfigMapVolumeSource{ ContainerPort: 30100, ContainerPort: 30101, ContainerPort: 30102, ContainerPort: 30103, ContainerPort: 30104, ContainerPort: 30105, ContainerPort: 3306, ContainerPort: 33060, ContainerPort: 33062, ContainerPort: 3307, ContainerPort: 3309, ContainerPort: 4444, ContainerPort: 4567, ContainerPort: 4568, ContainerPort: 7777, ContainerPort: 8404, Containers: []v1.Container{ + CreationTimestamp: v1.Time{}, - CreationTimestamp: v1.Time{Time: s"2025-11-19 17:30:23 +0000 UTC"}, + CurrentReplicas: 0, - CurrentReplicas: 2, - CurrentReplicas: 3, + CurrentRevision: "", - CurrentRevision: "monitoring-haproxy-64547b7577", - CurrentRevision: "monitoring-haproxy-67699f8fc9", - CurrentRevision: "monitoring-haproxy-6dbcd6fc7", - CurrentRevision: "monitoring-haproxy-f97cd5fc5", - CurrentRevision: "monitoring-pxc-5c4dcb8979", - CurrentRevision: "monitoring-pxc-64bb47876f", - CurrentRevision: "monitoring-pxc-65568f9d95", - CurrentRevision: "monitoring-pxc-c5d89d845", DataSource: nil, DataSourceRef: nil, - DefaultMode: &420, - DefaultMode: &420, + DefaultMode: nil, + DefaultMode: nil, DeletionGracePeriodSeconds: nil, DeletionGracePeriodSeconds: nil, DeletionTimestamp: nil, + DeprecatedServiceAccount: "", - DeprecatedServiceAccount: "default", + DNSPolicy: "", - DNSPolicy: "ClusterFirst", EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...}, Env: []v1.EnvVar{ EphemeralContainers: nil, Exec: nil, FailureThreshold: 3, FC: nil, FieldPath: "metadata.name", FieldPath: "metadata.namespace", FieldRef: nil, FieldRef: &v1.ObjectFieldSelector{ - FieldsType: "FieldsV1", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., FileKeyRef: nil, Finalizers: nil, Finalizers: nil, + Generation: 0, - Generation: 1, - Generation: 2, - Generation: 3, - Generation: 4, GitRepo: nil, GRPC: nil, Host: "", HostAliases: nil, HostAliases: nil, HostIP: "", HostIPC: false, Hostname: "", HostPort: 0, HTTPGet: &v1.HTTPGetAction{ HTTPHeaders: nil, ImagePullPolicy: "Always", ImagePullSecrets: nil, InitContainers: []v1.Container{ InitialDelaySeconds: 15, InitialDelaySeconds: 300, InitialDelaySeconds: 300, ISCSI: nil, Items: nil, Items: nil, - Key: "pmmserver", + Key: "pmmserverkey", "kubectl.kubernetes.io/default-container": "haproxy", "kubectl.kubernetes.io/default-container": "pxc", Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Labels: nil, + "last-applied-secret": "4247eaaee55a475b7ef0e15cc8802eb640dda70f4a7451716291b33c049d614e", Lifecycle: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, LivenessProbe: &v1.Probe{ LocalObjectReference: {Name: "auto-monitoring-pxc"}, LocalObjectReference: {Name: "internal-monitoring"}, LocalObjectReference: {Name: "monitoring-haproxy"}, LocalObjectReference: {Name: "monitoring-pxc"}, ManagedFields: nil, + ManagedFields: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - Manager: "kube-controller-manager", - Manager: "percona-xtradb-cluster-operator", MinReadySeconds: 0, Name: "", Name: "auto-config", {Name: "bin", VolumeSource: {EmptyDir: &{}}}, {Name: "CLIENT_PORT_LISTEN", Value: "7777"}, {Name: "CLIENT_PORT_MAX", Value: "30105"}, {Name: "CLIENT_PORT_MIN", Value: "30100"}, Name: "config", {Name: "DB_TYPE", Value: "haproxy"}, {Name: "DB_TYPE", Value: "mysql"}, {Name: "DB_USER", Value: "monitor"}, {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}}, Name: "haproxy-custom", Name: "ist", {Name: "MONITOR_USER", Value: "monitor"}, Name: "my-env-var-secrets", Name: "mysql", Name: "mysql-admin", Name: "mysql-init-file", Name: "mysql-replicas", Name: "mysql-users-secret-file", Name: "mysqlx", {Name: "PMM_AGENT_LISTEN_PORT", Value: "7777"}, {Name: "PMM_AGENT_PORTS_MIN", Value: "30100"}, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, Name: "PMM_AGENT_SERVER_PASSWORD", Name: "PMM_AGENT_SERVER_USERNAME", {Name: "PMM_AGENT_SERVER_USERNAME", Value: "api_key"}, {Name: "PMM_AGENT_SETUP_FORCE", Value: "1"}, Name: "PMM_AGENT_SETUP_NODE_NAME", {Name: "PMM_AGENT_SETUP_NODE_TYPE", Value: "container"}, Name: "PMM_PASSWORD", {Name: "PMM_SERVER", Value: "monitoring-service"}, Name: "PMM_USER", Name: "POD_NAME", Name: "POD_NAMESPASE", Name: "proxy-protocol", Namespace: "monitoring-2-0-15296", Name: "ssl", Name: "ssl-internal", Name: "sst", Name: "stats", {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, Name: "vault-keyring-secret", Name: "write-set", NFS: nil, NodeName: "", NodeSelector: nil, ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "last-applied-secret": "4247eaaee55a475b7ef0e15cc8802eb640dda70f4a7451716291b33c049d614e", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0"}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e"}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "last-applied-secret": "4247eaaee55a475b7ef0e15cc8802eb640dda70f4a7451716291b33c049d614e", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", ...}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/ssl-hash": "a75443314a73a61987c4abcf725e58db", "percona.com/ssl-internal-hash": "5b70e154306c2aec648e246633319556"}}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{ + ObservedGeneration: 0, - ObservedGeneration: 1, - ObservedGeneration: 2, - ObservedGeneration: 3, - ObservedGeneration: 4, - Operation: "Update", - Operation: "Update", Optional: &false, Optional: nil, Optional: &true, Optional: &true, Ordinals: nil, OS: nil, Overhead: nil, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "29529a89-5445-4434-a709-4031ee1a61a0", ...}}, OwnerReferences: nil, Path: "/local/Status", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", + "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI0MjQ3ZWFhZWU1NWE0NzViN2VmMGUxNWNjODgwMmViNjQwZGRhNzBmNGE3NDUxNzE2MjkxYjMzYzA0OWQ2MTRlIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjE1NC03YTYyM2IxMCIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMTU0LTdhNjIzYjEwIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYWRtaW4ifSx7Im5hbWUiOiJQTU1fUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVyIn19fSx7Im5hbWUiOiJDTElFTlRfUE9SVF9MSVNURU4iLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBPRF9OQU1FIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWUifX19LHsibmFtZSI6IlBPRF9OQU1FU1BBU0UiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZXNwYWNlIn19fSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0FERFJFU1MiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfVVNFUk5BTUUiLCJ2YWx1ZSI6ImFkbWluIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXIifX19LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fUE9SVCIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQTU1fQUdFTlRfQ09ORklHX0ZJTEUiLCJ2YWx1ZSI6Ii91c3IvbG9jYWwvcGVyY29uYS9wbW0yL2NvbmZpZy9wbW0tYWdlbnQueWFtbCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfSU5TRUNVUkVfVExTIiwidmFsdWUiOiIxIn0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9BRERSRVNTIiwidmFsdWUiOiIwLjAuMC4wIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFVFVQX01FVFJJQ1NfTU9ERSIsInZhbHVlIjoicHVz"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjE1NC03YTYyM2IxMCIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMTU0LTdhNjIzYjEwIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXJrZXkifX19LHsibmFtZSI6IkNMSUVOVF9QT1JUX0xJU1RFTiIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE9EX05BTUUiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZSJ9fX0seyJuYW1lIjoiUE9EX05BTUVTUEFTRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lc3BhY2UifX19LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfQUREUkVTUyIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9VU0VSTkFNRSIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVya2V5In19fSx7Im5hbWUiOiJQTU1fQUdFTlRfTElTVEVOX1BPUlQiLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE1NX0FHRU5UX0NPTkZJR19GSUxFIiwidmFsdWUiOiIvdXNyL2xvY2FsL3BlcmNvbmEvcG1tMi9jb25maWcvcG1tLWFnZW50LnlhbWwifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0lOU0VDVVJFX1RMUyIsInZhbHVlIjoiMSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fQUREUkVTUyIsInZhbHVlIjoiMC4wLjAuMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVRVUF9NRVRSSUNTX01PREUiLCJ2"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI0MjQ3ZWFhZWU1NWE0NzViN2VmMGUxNWNjODgwMmViNjQwZGRhNzBmNGE3NDUxNzE2MjkxYjMzYzA0OWQ2MTRlIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoiZDQxZDhjZDk4ZjAw"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzU0NDMzMTRhNzNhNjE5ODdjNGFiY2Y3MjVlNThkYiIsInBlcmNvbmEu"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzU0NDMzMTRhNzNhNjE5ODdjNGFiY2Y3MjVlNThkYiIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiNWI3MGUxNTQzMDZjMmFlYzY0OGUyNDY2MzMzMTk1NTYifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fV0sImluaXRDb250YWluZXJzIjpbeyJuYW1lIjoicHhjLWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMTU0LTdhNjIzYjEwIiwiY29tbWFuZCI6WyIvcHhjLWluaXQtZW50cnlwb2ludC5zaCJdLCJyZXNvdXJjZXMiOnsibGltaXRzIjp7ImNwdSI6IjUwbSIsIm1lbW9yeSI6IjUwTSJ9fSwidm9sdW1lTW91bnRzIjpbeyJuYW1lIjoiZGF0YWRpciIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifV0sImNvbnRhaW5lcnMiOlt7Im5hbWUiOiJwbW0tY2xpZW50IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BtbS1jbGllbnQ6ZGV2LWxhdGVzdCIsInBvcnRzIjpbeyJjb250YWluZXJQb3J0Ijo3Nzc3fSx7ImNvbnRhaW5lclBvcnQiOjMwMTAwfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAxfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAyfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAzfSx7ImNvbnRhaW5lclBvcnQiOjMwMTA0fSx7ImNvbnRhaW5lclBvcnQiOjMwMTA1fV0sImVudkZyb20iOlt7InNlY3JldFJlZiI6eyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwib3B0aW9uYWwiOnRydWV9fV0sImVudiI6W3sibmFtZSI6IlBNTV9TRVJWRVIiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9VU0VSIiwidmFsdWUiOiJhcGlfa2V5In0seyJuYW1lIjoiUE1NX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlcmtleSJ9fX0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTElTVEVOIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQT0RfTkFNRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lIn19fSx7Im5hbWUiOiJQT0RfTkFNRVNQQVNFIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWVzcGFjZSJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9BRERSRVNTIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1VTRVJOQU1FIiwidmFsdWUiOiJhcGlfa2V5In0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXJrZXkifX19LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fUE9SVCIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQTU1fQUdFTlRfQ09ORklHX0ZJTEUiLCJ2YWx1ZSI6Ii91c3IvbG9jYWwvcGVyY29uYS9wbW0yL2NvbmZpZy9wbW0tYWdlbnQueWFtbCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9T"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzU0NDMzMTRhNzNhNjE5ODdjNGFiY2Y3MjVlNThkYiIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiNWI3MGUxNTQzMDZjMmFlYzY0OGUyNDY2MzMzMTk1NTYifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fV0sImluaXRDb250YWluZXJzIjpbeyJuYW1lIjoicHhjLWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMTU0LTdhNjIzYjEwIiwiY29tbWFuZCI6WyIvcHhjLWluaXQtZW50cnlwb2ludC5zaCJdLCJyZXNvdXJjZXMiOnsibGltaXRzIjp7ImNwdSI6IjUwbSIsIm1lbW9yeSI6IjUwTSJ9fSwidm9sdW1lTW91bnRzIjpbeyJuYW1lIjoiZGF0YWRpciIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifV0sImNvbnRhaW5lcnMiOlt7Im5hbWUiOiJwbW0tY2xpZW50IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BtbS1jbGllbnQ6ZGV2LWxhdGVzdCIsInBvcnRzIjpbeyJjb250YWluZXJQb3J0Ijo3Nzc3fSx7ImNvbnRhaW5lclBvcnQiOjMwMTAwfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAxfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAyfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAzfSx7ImNvbnRhaW5lclBvcnQiOjMwMTA0fSx7ImNvbnRhaW5lclBvcnQiOjMwMTA1fV0sImVudkZyb20iOlt7InNlY3JldFJlZiI6eyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwib3B0aW9uYWwiOnRydWV9fV0sImVudiI6W3sibmFtZSI6IlBNTV9TRVJWRVIiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9VU0VSIiwidmFsdWUiOiJhZG1pbiJ9LHsibmFtZSI6IlBNTV9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXIifX19LHsibmFtZSI6IkNMSUVOVF9QT1JUX0xJU1RFTiIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE9EX05BTUUiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZSJ9fX0seyJuYW1lIjoiUE9EX05BTUVTUEFTRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lc3BhY2UifX19LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfQUREUkVTUyIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9VU0VSTkFNRSIsInZhbHVlIjoiYWRtaW4ifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlciJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9QT1JUIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9DT05GSUdfRklMRSIsInZhbHVlIjoiL3Vzci9sb2NhbC9wZXJjb25hL3BtbTIvY29uZmlnL3BtbS1hZ2VudC55YW1sIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9JTlNF"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., "percona.com/ssl-hash": "a75443314a73a61987c4abcf725e58db", "percona.com/ssl-internal-hash": "5b70e154306c2aec648e246633319556", + PeriodSeconds: 0, - PeriodSeconds: 10, + PersistentVolumeClaimRetentionPolicy: nil, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + Phase: "", - Phase: "Pending", + PodManagementPolicy: "", - PodManagementPolicy: "OrderedReady", Port: {IntVal: 7777}, Ports: []v1.ContainerPort{ PreemptionPolicy: nil, ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, ProbeHandler: v1.ProbeHandler{ + Protocol: "", - Protocol: "TCP", Quobyte: nil, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, ReadinessProbe: &v1.Probe{ + ReadyReplicas: 0, - ReadyReplicas: 2, - ReadyReplicas: 3, + Replicas: 0, + Replicas: &0, Replicas: &2, - Replicas: 2, - Replicas: &2, Replicas: &3, - Replicas: 3, - Replicas: &3, ResizePolicy: nil, ResourceFieldRef: nil, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, + ResourceVersion: "", - ResourceVersion: "1763573514896543017", - ResourceVersion: "1763573637573967003", - ResourceVersion: "1763573879990447017", - ResourceVersion: "1763573979698831003", - ResourceVersion: "1763574039389103017", - ResourceVersion: "1763574139571247003", - ResourceVersion: "1763574187920895017", - ResourceVersion: "1763574295827727003", + RestartPolicy: "", - RestartPolicy: "Always", - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, + SchedulerName: "", + SchedulerName: "", - SchedulerName: "default-scheduler", - SchedulerName: "default-scheduler", + Scheme: "", - Scheme: "HTTP", SecretKeyRef: &v1.SecretKeySelector{ SecretName: "internal-monitoring", SecretName: "monitoring-mysql-init", SecretName: "monitoring-ssl-internal", SecretName: "monitoring-vault", SecretName: "my-env-var-secrets", SecretName: "some-name-ssl", Secret: &v1.SecretVolumeSource{ SecurityContext: nil, + SecurityContext: nil, - SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, SelfLink: "", ServiceAccountName: "default", ServiceName: "monitoring-haproxy", ServiceName: "monitoring-pxc", SetHostnameAsFQDN: nil, ShareProcessNamespace: nil, Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PodSpec{ Spec: v1.StatefulSetSpec{ StartupProbe: nil, Status: v1.PersistentVolumeClaimStatus{ Status: v1.StatefulSetStatus{ StorageClassName: nil, Subdomain: "", Subdomain: "", - Subresource: "status", SuccessThreshold: 1, TCPSocket: nil, Template: v1.PodTemplateSpec{ TerminationGracePeriodSeconds: &30, TerminationGracePeriodSeconds: &600, TerminationGracePeriodSeconds: nil, + TerminationMessagePath: "", - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "", - TerminationMessagePolicy: "File", TimeoutSeconds: 15, TimeoutSeconds: 5, TimeoutSeconds: 5, - Time: s"2025-11-19 17:30:23 +0000 UTC", - Time: s"2025-11-19 17:31:54 +0000 UTC", - Time: s"2025-11-19 17:33:57 +0000 UTC", - Time: s"2025-11-19 17:37:08 +0000 UTC", - Time: s"2025-11-19 17:37:59 +0000 UTC", - Time: s"2025-11-19 17:39:39 +0000 UTC", - Time: s"2025-11-19 17:39:55 +0000 UTC", - Time: s"2025-11-19 17:40:39 +0000 UTC", - Time: s"2025-11-19 17:42:19 +0000 UTC", - Time: s"2025-11-19 17:42:23 +0000 UTC", - Time: s"2025-11-19 17:43:07 +0000 UTC", - Time: s"2025-11-19 17:44:55 +0000 UTC", Tolerations: nil, Tolerations: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, TypeMeta: {}, TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, + UID: "", - UID: "3392c899-e45c-4180-8bc2-3da20dda94e7", - UID: "59d5bace-c706-4a0c-8f53-f085c88d90a7", + UpdatedReplicas: 0, - UpdatedReplicas: 2, - UpdatedReplicas: 3, + UpdateRevision: "", - UpdateRevision: "monitoring-haproxy-64547b7577", - UpdateRevision: "monitoring-haproxy-67699f8fc9", - UpdateRevision: "monitoring-haproxy-6dbcd6fc7", - UpdateRevision: "monitoring-haproxy-f97cd5fc5", - UpdateRevision: "monitoring-pxc-5c4dcb8979", - UpdateRevision: "monitoring-pxc-64bb47876f", - UpdateRevision: "monitoring-pxc-65568f9d95", - UpdateRevision: "monitoring-pxc-c5d89d845", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, &v1.StatefulSet{ Value: "", - Value: "admin", + Value: "api_key", ValueFrom: nil, ValueFrom: &v1.EnvVarSource{ + Value: "$(PMM_PREFIX)$(POD_NAMESPASE)-$(POD_NAME)", - Value: "$(POD_NAMESPASE)-$(POD_NAME)", VolumeAttributesClassName: nil, VolumeClaimTemplates: nil, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ VolumeDevices: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}}, VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}}, VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeName: "", VolumeSource: v1.VolumeSource{ Volumes: []v1.Volume{ VsphereVolume: nil, WorkingDir: "", + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-15296 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.fh6cidFaU6 ++ mktemp + local LAST_ERR=/tmp/tmp.vmGwmEOFuM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fh6cidFaU6 perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-15296 namespace + cat /tmp/tmp.vmGwmEOFuM + rm /tmp/tmp.fh6cidFaU6 /tmp/tmp.vmGwmEOFuM + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tmIiuq2ys7 ++ mktemp + local LAST_ERR=/tmp/tmp.3OlMJWy8wM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tmIiuq2ys7 No resources found + cat /tmp/tmp.3OlMJWy8wM + rm /tmp/tmp.tmIiuq2ys7 /tmp/tmp.3OlMJWy8wM + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.l7VprgdXU2 ++ mktemp + local LAST_ERR=/tmp/tmp.8arXXmxKxL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.l7VprgdXU2 No resources found + cat /tmp/tmp.8arXXmxKxL + rm /tmp/tmp.l7VprgdXU2 /tmp/tmp.8arXXmxKxL + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.r0EX3NT7V4 ++ mktemp + local LAST_ERR=/tmp/tmp.jbRcyehVre + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.r0EX3NT7V4 validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.jbRcyehVre + rm /tmp/tmp.r0EX3NT7V4 /tmp/tmp.jbRcyehVre + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-15296 + rm -rf /tmp/tmp.HXYCfGXVn6 + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.waulVWSXm1 + local LAST_OUT=/tmp/tmp.SIVxcMCysh ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.ZLxasBz59b + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.gdctVVCuV4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-15296