Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/logs/monitoring-2-0-8-0.log Warning: version difference between client (1.36) and server (1.33) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.36) and server (1.33) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-25359 + local ns=monitoring-2-0-25359 + '[' -n pxc-operator ']' + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + kubectl patch pxc -n monitoring-2-0-9137 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.4aQCkW20rz ++ mktemp + local LAST_ERR=/tmp/tmp.nP6WC83H0j + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4aQCkW20rz perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-9137 namespace + cat /tmp/tmp.nP6WC83H0j + rm /tmp/tmp.4aQCkW20rz /tmp/tmp.nP6WC83H0j + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.9M6gJtgxRk ++ mktemp + local LAST_ERR=/tmp/tmp.VUhCkUH9KZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9M6gJtgxRk No resources found + cat /tmp/tmp.VUhCkUH9KZ + rm /tmp/tmp.9M6gJtgxRk /tmp/tmp.VUhCkUH9KZ + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.SHsiaUVIjx ++ mktemp + local LAST_ERR=/tmp/tmp.IgWDXKheyK + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SHsiaUVIjx No resources found + cat /tmp/tmp.IgWDXKheyK + rm /tmp/tmp.SHsiaUVIjx /tmp/tmp.IgWDXKheyK + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl api-resources ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + awk '{print$1}' + kubectl_bin get ns ++ mktemp + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.D44axSYxWC + local LAST_OUT=/tmp/tmp.xIdXQGVY2P ++ mktemp + local LAST_ERR=/tmp/tmp.m24Vyout8l + local exit_status=0 ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.kYMIrS2QJ9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D44axSYxWC + cat /tmp/tmp.m24Vyout8l + rm /tmp/tmp.D44axSYxWC /tmp/tmp.m24Vyout8l + return 0 namespace "monitoring-2-0-9137" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xIdXQGVY2P namespace "pxc-operator" deleted + cat /tmp/tmp.kYMIrS2QJ9 + rm /tmp/tmp.xIdXQGVY2P /tmp/tmp.kYMIrS2QJ9 + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.VDyglcXKqV ++ mktemp + local LAST_ERR=/tmp/tmp.oDoSAIdD11 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VDyglcXKqV namespace/pxc-operator created + cat /tmp/tmp.oDoSAIdD11 + rm /tmp/tmp.VDyglcXKqV /tmp/tmp.oDoSAIdD11 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.z1Mh7vDFQ4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.sj2GZrSJBq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.z1Mh7vDFQ4 ++ cat /tmp/tmp.sj2GZrSJBq ++ rm /tmp/tmp.z1Mh7vDFQ4 /tmp/tmp.sj2GZrSJBq ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.nn7TsZyBO9 ++ mktemp + local LAST_ERR=/tmp/tmp.nutHVJSsZ6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nn7TsZyBO9 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6" modified. + cat /tmp/tmp.nutHVJSsZ6 + rm /tmp/tmp.nn7TsZyBO9 /tmp/tmp.nutHVJSsZ6 + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.6HeuEHpyix ++ mktemp + local LAST_ERR=/tmp/tmp.9Yy4r3LmUm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6HeuEHpyix customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.9Yy4r3LmUm + rm /tmp/tmp.6HeuEHpyix /tmp/tmp.9Yy4r3LmUm + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + kubectl_bin apply -f - + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.TAdpfE1Bv0 ++ mktemp + local LAST_ERR=/tmp/tmp.hr3LB8kExD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TAdpfE1Bv0 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.hr3LB8kExD + rm /tmp/tmp.TAdpfE1Bv0 /tmp/tmp.hr3LB8kExD + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.vaa8G6nUuJ ++ mktemp + local LAST_ERR=/tmp/tmp.DDkxLjrkjo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vaa8G6nUuJ deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.DDkxLjrkjo + rm /tmp/tmp.vaa8G6nUuJ /tmp/tmp.DDkxLjrkjo + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.nycKYOenRd ++ mktemp + local LAST_ERR=/tmp/tmp.SqZjHmLfHe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nycKYOenRd pod/percona-xtradb-cluster-operator-9d9fbdb5-4wm8r condition met + cat /tmp/tmp.SqZjHmLfHe E0516 19:55:35.658690 10123 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/pxc-operator/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpercona-xtradb-cluster-operator-9d9fbdb5-4wm8r&resourceVersion=1778961335295023000&timeoutSeconds=477&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" + rm /tmp/tmp.nycKYOenRd /tmp/tmp.SqZjHmLfHe + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 +++ mktemp ++ local LAST_OUT=/tmp/tmp.RSlLkLcvom +++ mktemp ++ local LAST_ERR=/tmp/tmp.a1gflKOdQ9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RSlLkLcvom ++ cat /tmp/tmp.a1gflKOdQ9 ++ rm /tmp/tmp.RSlLkLcvom /tmp/tmp.a1gflKOdQ9 ++ return 0 + wait_pod percona-xtradb-cluster-operator-9d9fbdb5-4wm8r 480 pxc-operator + local pod=percona-xtradb-cluster-operator-9d9fbdb5-4wm8r + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-9d9fbdb5-4wm8r ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-9d9fbdb5-4wm8r condition met E0516 19:55:40.190814 10767 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/pxc-operator/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpercona-xtradb-cluster-operator-9d9fbdb5-4wm8r&resourceVersion=1778961338621814000&timeoutSeconds=448&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/percona-xtradb-cluster-operator-9d9fbdb5-4wm8r to become Ready.Ok + sleep 3 + create_namespace monitoring-2-0-25359 + local namespace=monitoring-2-0-25359 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ sed s/NAMESPACE// ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-25359' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-25359 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-25359 + awk '{print$1}' ++ mktemp + kubectl_bin get ns + local LAST_OUT=/tmp/tmp.tPShRbz4QU + xargs kubectl delete ns + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.iUXM5r1ldv ++ mktemp + local LAST_ERR=/tmp/tmp.sO6esNA82s + local exit_status=0 ++ mktemp ++ seq 0 2 + local LAST_ERR=/tmp/tmp.S8cfvbFWI4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-25359 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iUXM5r1ldv + cat /tmp/tmp.S8cfvbFWI4 + rm /tmp/tmp.iUXM5r1ldv /tmp/tmp.S8cfvbFWI4 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-25359 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-25359 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.tPShRbz4QU + cat /tmp/tmp.sO6esNA82s Error from server (NotFound): namespaces "monitoring-2-0-25359" not found + rm /tmp/tmp.tPShRbz4QU /tmp/tmp.sO6esNA82s + return 1 + : + wait_for_delete namespace/monitoring-2-0-25359 + local res=namespace/monitoring-2-0-25359 + echo -n 'waiting for namespace/monitoring-2-0-25359 to be deleted' waiting for namespace/monitoring-2-0-25359 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-25359" not found + desc 'create namespace monitoring-2-0-25359' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-25359 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-25359 ++ mktemp + local LAST_OUT=/tmp/tmp.8aIXEMoEvB ++ mktemp + local LAST_ERR=/tmp/tmp.qSWPQvNnpe + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-25359 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8aIXEMoEvB namespace/monitoring-2-0-25359 created + cat /tmp/tmp.qSWPQvNnpe + rm /tmp/tmp.8aIXEMoEvB /tmp/tmp.qSWPQvNnpe + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.WHc7pJjwNI +++ mktemp ++ local LAST_ERR=/tmp/tmp.8PtRF4BwXI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WHc7pJjwNI ++ cat /tmp/tmp.8PtRF4BwXI ++ rm /tmp/tmp.WHc7pJjwNI /tmp/tmp.8PtRF4BwXI ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6 --namespace=monitoring-2-0-25359 ++ mktemp + local LAST_OUT=/tmp/tmp.jLhH4MsIo4 ++ mktemp + local LAST_ERR=/tmp/tmp.pWhHiU60v7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6 --namespace=monitoring-2-0-25359 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jLhH4MsIo4 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2467-3dc7f023-16-cluster6" modified. + cat /tmp/tmp.pWhHiU60v7 + rm /tmp/tmp.jLhH4MsIo4 /tmp/tmp.pWhHiU60v7 + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.A3czHSGRwR ++ mktemp + local LAST_ERR=/tmp/tmp.ZSeTDbMhB6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.A3czHSGRwR secret/minio-secret created secret/aws-s3-secret created secret/do-spaces-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.ZSeTDbMhB6 + rm /tmp/tmp.A3czHSGRwR /tmp/tmp.ZSeTDbMhB6 + return 0 + deploy_helm monitoring-2-0-25359 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Sat May 16 19:56:35 2026 NAMESPACE: monitoring-2-0-25359 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-25359.svc.cluster.local:443 login: admin password: admin + wait_for_pmm_service + timeout=420 ++ date +%s + start=1778961396 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1778961398 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961401 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1778961404 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961408 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1778961411 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961414 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961418 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961421 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1778961424 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1778961428 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + kubectl_bin wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s ++ mktemp + local LAST_OUT=/tmp/tmp.JsMgvznwmm ++ mktemp + local LAST_ERR=/tmp/tmp.rTWlbEWNzy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JsMgvznwmm statefulset.apps/monitoring condition met + cat /tmp/tmp.rTWlbEWNzy + rm /tmp/tmp.JsMgvznwmm /tmp/tmp.rTWlbEWNzy + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eoNs8H9IhX +++ mktemp ++ local LAST_ERR=/tmp/tmp.DfnAHLiu8J ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.eoNs8H9IhX ++ cat /tmp/tmp.DfnAHLiu8J ++ rm /tmp/tmp.eoNs8H9IhX /tmp/tmp.DfnAHLiu8J ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.NZz5XFpUHg ++ mktemp + local LAST_ERR=/tmp/tmp.cs187BCVxg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NZz5XFpUHg logger=settings t=2026-05-16T19:57:25.07869324Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2026-05-16T19:57:25.07884486Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2026-05-16T19:57:25.07885995Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2026-05-16T19:57:25.07886525Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2026-05-16T19:57:25.0788697Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2026-05-16T19:57:25.07887386Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2026-05-16T19:57:25.07887796Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2026-05-16T19:57:25.07888219Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2026-05-16T19:57:25.07889151Z level=info msg="App mode production" logger=sqlstore t=2026-05-16T19:57:25.0789605Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2026-05-16T19:57:25.112789618Z level=info msg="Starting DB migrations" logger=migrator t=2026-05-16T19:57:25.117961056Z level=info msg="migrations completed" performed=0 skipped=452 duration=446.26µs logger=secrets t=2026-05-16T19:57:25.119581496Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2026-05-16T19:57:25.154041283Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2026-05-16T19:57:25.266772863Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2026-05-16T19:57:25.266810713Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.signature.validator t=2026-05-16T19:57:25.266831933Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2026-05-16T19:57:25.267012493Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2026-05-16T19:57:25.267041473Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2026-05-16T19:57:25.27440791Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.loader t=2026-05-16T19:57:25.27466885Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2026-05-16T19:57:25.27467976Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel logger=plugin.loader t=2026-05-16T19:57:25.27468413Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2026-05-16T19:57:25.27468965Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2026-05-16T19:57:25.2746951Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2026-05-16T19:57:25.27470071Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2026-05-16T19:57:25.27470609Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2026-05-16T19:57:25.27471145Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2026-05-16T19:57:25.283851957Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2026-05-16T19:57:25.283868367Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2026-05-16T19:57:25.283876897Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2026-05-16T19:57:25.283880617Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2026-05-16T19:57:25.283884607Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel Admin password changed successfully ✔ + cat /tmp/tmp.cs187BCVxg + rm /tmp/tmp.NZz5XFpUHg /tmp/tmp.cs187BCVxg + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.mVP3gRTJ63 ++ mktemp + local LAST_ERR=/tmp/tmp.pN6go4zJR4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mVP3gRTJ63 secret/my-cluster-secrets created + cat /tmp/tmp.pN6go4zJR4 + rm /tmp/tmp.mVP3gRTJ63 /tmp/tmp.pN6go4zJR4 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml + local pvc_name= + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ mktemp + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-25359~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/conf/client.yml + local LAST_OUT=/tmp/tmp.qZv9HipKLl ++ mktemp + local LAST_ERR=/tmp/tmp.TvUpvPzAU2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qZv9HipKLl deployment.apps/pxc-client created + cat /tmp/tmp.TvUpvPzAU2 + rm /tmp/tmp.qZv9HipKLl /tmp/tmp.TvUpvPzAU2 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/monitoring.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + local LAST_OUT=/tmp/tmp.5YcuHLXWSn + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-25359~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ mktemp + local LAST_ERR=/tmp/tmp.RNAYmGxqE0 + local exit_status=0 + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2467-3dc7f023#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5YcuHLXWSn perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.RNAYmGxqE0 + rm /tmp/tmp.5YcuHLXWSn /tmp/tmp.RNAYmGxqE0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.WJrDiSFZjc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rbHJoRN4Nb +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.WJrDiSFZjc +++ cat /tmp/tmp.rbHJoRN4Nb +++ rm /tmp/tmp.WJrDiSFZjc /tmp/tmp.rbHJoRN4Nb +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-25359 ++ mktemp + local LAST_OUT=/tmp/tmp.NvY0gLwXr4 ++ mktemp + local LAST_ERR=/tmp/tmp.z2Ya2wKCxB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-25359 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-25359 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NvY0gLwXr4 pod/monitoring-haproxy-0 condition met pod/monitoring-pxc-0 condition met + cat /tmp/tmp.z2Ya2wKCxB E0516 19:58:26.556485 26418 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-0&resourceVersion=1778961506400815024&timeoutSeconds=494&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" + rm /tmp/tmp.NvY0gLwXr4 /tmp/tmp.z2Ya2wKCxB + return 0 + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met E0516 19:58:28.056031 31928 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-haproxy-0&resourceVersion=1778961506541599024&timeoutSeconds=348&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met E0516 19:58:32.818538 32484 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-0&resourceVersion=1778961511960959024&timeoutSeconds=471&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.NCVZ4J4iRU +++ mktemp ++ local LAST_ERR=/tmp/tmp.2Y8uPSFI9i ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NCVZ4J4iRU ++ cat /tmp/tmp.2Y8uPSFI9i ++ rm /tmp/tmp.NCVZ4J4iRU /tmp/tmp.2Y8uPSFI9i ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aPl5FaL04m +++ mktemp ++ local LAST_ERR=/tmp/tmp.TKP5Plgtm8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aPl5FaL04m ++ cat /tmp/tmp.TKP5Plgtm8 ++ rm /tmp/tmp.aPl5FaL04m /tmp/tmp.TKP5Plgtm8 ++ return 0 + client_pod=pxc-client-67fc4995bb-jpktq + wait_pod pxc-client-67fc4995bb-jpktq + local pod=pxc-client-67fc4995bb-jpktq + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-jpktq ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-jpktq condition met E0516 20:03:16.268941 32620 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-67fc4995bb-jpktq&resourceVersion=1778961794638760000&timeoutSeconds=565&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-67fc4995bb-jpktq to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8rEyiiwUD9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.P97OXhSJG7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8rEyiiwUD9 ++ cat /tmp/tmp.P97OXhSJG7 ++ rm /tmp/tmp.8rEyiiwUD9 /tmp/tmp.P97OXhSJG7 ++ return 0 + client_pod=pxc-client-67fc4995bb-jpktq + wait_pod pxc-client-67fc4995bb-jpktq + local pod=pxc-client-67fc4995bb-jpktq + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-jpktq ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-jpktq condition met E0516 20:03:26.897371 1651 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-67fc4995bb-jpktq&resourceVersion=1778961804724549000&timeoutSeconds=450&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-67fc4995bb-jpktq to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PKeXNv0RxF +++ mktemp ++ local LAST_ERR=/tmp/tmp.ObBCDGrcAc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PKeXNv0RxF ++ cat /tmp/tmp.ObBCDGrcAc ++ rm /tmp/tmp.PKeXNv0RxF /tmp/tmp.ObBCDGrcAc ++ return 0 + client_pod=pxc-client-67fc4995bb-jpktq + wait_pod pxc-client-67fc4995bb-jpktq + local pod=pxc-client-67fc4995bb-jpktq + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-67fc4995bb-jpktq ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-jpktq condition met E0516 20:04:08.793528 7899 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-67fc4995bb-jpktq&resourceVersion=1778961846830348000&timeoutSeconds=506&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-67fc4995bb-jpktq to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.7PFIFwbwjt/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.7PFIFwbwjt/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.7PFIFwbwjt/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i2eBWNpjcu +++ mktemp ++ local LAST_ERR=/tmp/tmp.W6bAoiYZ0A ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i2eBWNpjcu ++ cat /tmp/tmp.W6bAoiYZ0A ++ rm /tmp/tmp.i2eBWNpjcu /tmp/tmp.W6bAoiYZ0A ++ return 0 + client_pod=pxc-client-67fc4995bb-jpktq + wait_pod pxc-client-67fc4995bb-jpktq + local pod=pxc-client-67fc4995bb-jpktq + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-67fc4995bb-jpktq ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-jpktq condition met E0516 20:04:18.057865 9202 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-67fc4995bb-jpktq&resourceVersion=1778961856107132000&timeoutSeconds=554&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-67fc4995bb-jpktq to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.7PFIFwbwjt/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.7PFIFwbwjt/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.7PFIFwbwjt/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.RlhCXHDpa7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.2JO78oLPUd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.RlhCXHDpa7 ++ cat /tmp/tmp.2JO78oLPUd ++ rm /tmp/tmp.RlhCXHDpa7 /tmp/tmp.2JO78oLPUd ++ return 0 + client_pod=pxc-client-67fc4995bb-jpktq + wait_pod pxc-client-67fc4995bb-jpktq + local pod=pxc-client-67fc4995bb-jpktq + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-jpktq ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-jpktq condition met E0516 20:04:27.657318 10474 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dpxc-client-67fc4995bb-jpktq&resourceVersion=1778961865652571000&timeoutSeconds=558&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" waiting for pod/pxc-client-67fc4995bb-jpktq to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.7PFIFwbwjt/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.7PFIFwbwjt/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.7PFIFwbwjt/select-1.sql + is_keyring_plugin_in_use monitoring + local cluster=monitoring + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + kubectl exec monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' + grep -E -o 'early-plugin-load=keyring_\w+.so' + return 1 + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mkv3KGjQsx +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GQKtldb7HX ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.mkv3KGjQsx ++++ cat /tmp/tmp.GQKtldb7HX ++++ rm /tmp/tmp.mkv3KGjQsx /tmp/tmp.GQKtldb7HX ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.MgR3VB8FKa +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.50SLZrZm3n ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.MgR3VB8FKa ++++ cat /tmp/tmp.50SLZrZm3n ++++ rm /tmp/tmp.MgR3VB8FKa /tmp/tmp.50SLZrZm3n ++++ return 0 +++ endpoint=34.133.175.203 +++ '[' -n 34.133.175.203 ']' +++ '[' 34.133.175.203 '!=' null ']' +++ echo 34.133.175.203 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.133.175.203/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 272 82 --:--:-- --:--:-- --:--:-- 355 + API_KEY='"eyJrIjoia3pxT1BTaUxyUzVTTWVTaVNzb1IxNENxZThSVW9TSWYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoia3pxT1BTaUxyUzVTTWVTaVNzb1IxNENxZThSVW9TSWYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.ud60jegJ08 ++ mktemp + local LAST_ERR=/tmp/tmp.hyD4YSgKff + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoia3pxT1BTaUxyUzVTTWVTaVNzb1IxNENxZThSVW9TSWYiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ud60jegJ08 secret/my-cluster-secrets patched + cat /tmp/tmp.hyD4YSgKff + rm /tmp/tmp.ud60jegJ08 /tmp/tmp.hyD4YSgKff + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s E0516 20:04:57.153065 14468 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-haproxy-0&resourceVersion=1778961896428543000&timeoutSeconds=432&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met E0516 20:05:09.072380 14468 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-0&resourceVersion=1778961908765631018&timeoutSeconds=355&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-pxc-0 condition met E0516 20:05:09.374468 14468 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-1&resourceVersion=1778961908765631018&timeoutSeconds=379&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/monitoring to be ready' waiting for pxc/monitoring to be ready++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mFklQW6uIV +++ mktemp ++ local LAST_ERR=/tmp/tmp.mqNnIjeKHw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mFklQW6uIV ++ cat /tmp/tmp.mqNnIjeKHw ++ rm /tmp/tmp.mFklQW6uIV /tmp/tmp.mqNnIjeKHw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i2fmMWBMvT +++ mktemp ++ local LAST_ERR=/tmp/tmp.aq5C3zOlEx ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i2fmMWBMvT ++ cat /tmp/tmp.aq5C3zOlEx ++ rm /tmp/tmp.i2fmMWBMvT /tmp/tmp.aq5C3zOlEx ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.X96WtIf55V +++ mktemp ++ local LAST_ERR=/tmp/tmp.f5GcVAw9lL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.X96WtIf55V ++ cat /tmp/tmp.f5GcVAw9lL ++ rm /tmp/tmp.X96WtIf55V /tmp/tmp.f5GcVAw9lL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bu1G9oMPOM +++ mktemp ++ local LAST_ERR=/tmp/tmp.TcDkgw277X ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bu1G9oMPOM ++ cat /tmp/tmp.TcDkgw277X ++ rm /tmp/tmp.bu1G9oMPOM /tmp/tmp.TcDkgw277X ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.24JwblJbfa +++ mktemp ++ local LAST_ERR=/tmp/tmp.HVYWVy6pPJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.24JwblJbfa ++ cat /tmp/tmp.HVYWVy6pPJ ++ rm /tmp/tmp.24JwblJbfa /tmp/tmp.HVYWVy6pPJ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pLaNBBNoT7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.yaGYgczbld ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pLaNBBNoT7 ++ cat /tmp/tmp.yaGYgczbld ++ rm /tmp/tmp.pLaNBBNoT7 /tmp/tmp.yaGYgczbld ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dxhHMbSGz7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.wcXsZfGkli ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dxhHMbSGz7 ++ cat /tmp/tmp.wcXsZfGkli ++ rm /tmp/tmp.dxhHMbSGz7 /tmp/tmp.wcXsZfGkli ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.BS79TADCGW +++ mktemp ++ local LAST_ERR=/tmp/tmp.GWCcD6ElE6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BS79TADCGW ++ cat /tmp/tmp.GWCcD6ElE6 ++ rm /tmp/tmp.BS79TADCGW /tmp/tmp.GWCcD6ElE6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.K3HScF5Fio +++ mktemp ++ local LAST_ERR=/tmp/tmp.4RjV9795RE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.K3HScF5Fio ++ cat /tmp/tmp.4RjV9795RE ++ rm /tmp/tmp.K3HScF5Fio /tmp/tmp.4RjV9795RE ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.R2DPhworrK +++ mktemp ++ local LAST_ERR=/tmp/tmp.mcU2AF2sDc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.R2DPhworrK ++ cat /tmp/tmp.mcU2AF2sDc ++ rm /tmp/tmp.R2DPhworrK /tmp/tmp.mcU2AF2sDc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dFw86KfLwU +++ mktemp ++ local LAST_ERR=/tmp/tmp.eIQSrf29OF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dFw86KfLwU ++ cat /tmp/tmp.eIQSrf29OF ++ rm /tmp/tmp.dFw86KfLwU /tmp/tmp.eIQSrf29OF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0VaAc860Li +++ mktemp ++ local LAST_ERR=/tmp/tmp.bQ0qQqaJsK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0VaAc860Li ++ cat /tmp/tmp.bQ0qQqaJsK ++ rm /tmp/tmp.0VaAc860Li /tmp/tmp.bQ0qQqaJsK ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tvG6GzBQ4Y +++ mktemp ++ local LAST_ERR=/tmp/tmp.w9JrMTbKCW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tvG6GzBQ4Y ++ cat /tmp/tmp.w9JrMTbKCW ++ rm /tmp/tmp.tvG6GzBQ4Y /tmp/tmp.w9JrMTbKCW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.tVogTuEPZX +++ mktemp ++ local LAST_ERR=/tmp/tmp.FGNef0zbVz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.tVogTuEPZX ++ cat /tmp/tmp.FGNef0zbVz ++ rm /tmp/tmp.tVogTuEPZX /tmp/tmp.FGNef0zbVz ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZHrFe9FQK3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.5sdHJd71xA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZHrFe9FQK3 ++ cat /tmp/tmp.5sdHJd71xA ++ rm /tmp/tmp.ZHrFe9FQK3 /tmp/tmp.5sdHJd71xA ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LmruQEGQ4m +++ mktemp ++ local LAST_ERR=/tmp/tmp.TVnHQCYrFK ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LmruQEGQ4m ++ cat /tmp/tmp.TVnHQCYrFK ++ rm /tmp/tmp.LmruQEGQ4m /tmp/tmp.TVnHQCYrFK ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ni9naRRezY +++ mktemp ++ local LAST_ERR=/tmp/tmp.ng2JFOz50w ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ni9naRRezY ++ cat /tmp/tmp.ng2JFOz50w ++ rm /tmp/tmp.ni9naRRezY /tmp/tmp.ng2JFOz50w ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.PstdH4zSnL ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.ygNoQkHe98 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.PstdH4zSnL +++++ cat /tmp/tmp.ygNoQkHe98 +++++ rm /tmp/tmp.PstdH4zSnL /tmp/tmp.ygNoQkHe98 +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mkvavOtig3 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HPh7UqdmQs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mkvavOtig3 ++ cat /tmp/tmp.HPh7UqdmQs ++ rm /tmp/tmp.mkvavOtig3 /tmp/tmp.HPh7UqdmQs ++ return 0 + [[ 2 == \2 ]] + echo + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.7PFIFwbwjt/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k133.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-aks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-25359", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.IR3CxHZlZl ++ mktemp + local LAST_ERR=/tmp/tmp.LKmfx1Ldob + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IR3CxHZlZl + cat /tmp/tmp.LKmfx1Ldob + rm /tmp/tmp.IR3CxHZlZl /tmp/tmp.LKmfx1Ldob + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml /tmp/tmp.7PFIFwbwjt/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:07:42+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-05-16T20:07:42+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.7PFIFwbwjt/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.33 >= 1.33' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k129.yml ']' + version_gt 1.27 ++ bc -l ++ echo '1.33 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-25359", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.dSAFW8Jevh ++ mktemp + local LAST_ERR=/tmp/tmp.GESiHm9B9B + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.dSAFW8Jevh + cat /tmp/tmp.GESiHm9B9B + rm /tmp/tmp.dSAFW8Jevh /tmp/tmp.GESiHm9B9B + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml /tmp/tmp.7PFIFwbwjt/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:07:44+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-05-16T20:07:44+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.DitFoEVObv ++ mktemp + local LAST_ERR=/tmp/tmp.PPcOTv6BWu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.DitFoEVObv secret/my-env-var-secrets created + cat /tmp/tmp.PPcOTv6BWu + rm /tmp/tmp.DitFoEVObv /tmp/tmp.PPcOTv6BWu + return 0 + wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + desc 'add new PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add new PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7AeibwuR6v +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.537S6Z8G6N ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.7AeibwuR6v ++++ cat /tmp/tmp.537S6Z8G6N ++++ rm /tmp/tmp.7AeibwuR6v /tmp/tmp.537S6Z8G6N ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.VYo5nnDEog +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nyCCETunzY ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.VYo5nnDEog ++++ cat /tmp/tmp.nyCCETunzY ++++ rm /tmp/tmp.VYo5nnDEog /tmp/tmp.nyCCETunzY ++++ return 0 +++ endpoint=34.133.175.203 +++ '[' -n 34.133.175.203 ']' +++ '[' 34.133.175.203 '!=' null ']' +++ head -n 1 +++ echo 34.133.175.203 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.133.175.203/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 288 90 --:--:-- --:--:-- --:--:-- 379 + API_KEY_NEW='"eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.NibcuMFwQK ++ mktemp + local LAST_ERR=/tmp/tmp.JFFDHursuR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NibcuMFwQK secret/my-cluster-secrets patched + cat /tmp/tmp.JFFDHursuR + rm /tmp/tmp.NibcuMFwQK /tmp/tmp.JFFDHursuR + return 0 + desc 'delete old PMM key' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM key ----------------------------------------------------------------------------------- ++ jq '.[] | select( .name == "operator").id' +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xliKkJVm2y +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Gsp1ABn3x2 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.xliKkJVm2y ++++ cat /tmp/tmp.Gsp1ABn3x2 ++++ rm /tmp/tmp.xliKkJVm2y /tmp/tmp.Gsp1ABn3x2 ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.L56ZohF79T +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2U9giv7qNC ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.L56ZohF79T ++++ cat /tmp/tmp.2U9giv7qNC ++++ rm /tmp/tmp.L56ZohF79T /tmp/tmp.2U9giv7qNC ++++ return 0 +++ endpoint=34.133.175.203 +++ '[' -n 34.133.175.203 ']' +++ '[' 34.133.175.203 '!=' null ']' +++ echo 34.133.175.203 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X GET https://admin:admin@34.133.175.203/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 602 100 602 0 0 1401 0 --:--:-- --:--:-- --:--:-- 1403 + ID_API_KEY_OLD=6 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bog2k9dZgq ++++ mktemp +++ local LAST_ERR=/tmp/tmp.vZdzKhQMGy +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.bog2k9dZgq +++ cat /tmp/tmp.vZdzKhQMGy +++ rm /tmp/tmp.bog2k9dZgq /tmp/tmp.vZdzKhQMGy +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gUoOw4N5hS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DEChl8yYEw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.gUoOw4N5hS +++ cat /tmp/tmp.DEChl8yYEw +++ rm /tmp/tmp.gUoOw4N5hS /tmp/tmp.DEChl8yYEw +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ echo 34.133.175.203 ++ return + curl --insecure -X DELETE https://admin:admin@34.133.175.203/graph/api/auth/keys/6 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 60 0 --:--:-- --:--:-- --:--:-- 60 {"message":"API key deleted"}+ wait_for_generation sts/monitoring-pxc 4 + local resource=sts/monitoring-pxc + local target_generation=4 + echo 'Waiting for sts/monitoring-pxc to reach generation 4...' Waiting for sts/monitoring-pxc to reach generation 4... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-pxc has reached generation 4.' Resource sts/monitoring-pxc has reached generation 4. + break + wait_for_generation sts/monitoring-haproxy 4 + local resource=sts/monitoring-haproxy + local target_generation=4 + echo 'Waiting for sts/monitoring-haproxy to reach generation 4...' Waiting for sts/monitoring-haproxy to reach generation 4... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 4.' Resource sts/monitoring-haproxy has reached generation 4. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s E0516 20:10:53.814401 31263 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-haproxy-0&resourceVersion=1778962253117776000&timeoutSeconds=572&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met E0516 20:11:06.722346 31263 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-0&resourceVersion=1778962266416143018&timeoutSeconds=362&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-pxc-0 condition met E0516 20:11:07.023998 31263 reflector.go:227] "Failed to watch" err="Get \"https://34.56.22.232/api/v1/namespaces/monitoring-2-0-25359/pods?allowWatchBookmarks=true&fieldSelector=metadata.name%3Dmonitoring-pxc-1&resourceVersion=1778962266416143018&timeoutSeconds=526&watch=true\": context canceled" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" type="*unstructured.Unstructured" pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.7PFIFwbwjt/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ bc -l ++ echo '1.33 >= 1.33' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-oc.yml ']' + version_gt 1.29 ++ bc -l ++ echo '1.33 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-25359", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.oZN7xXO1f4 ++ mktemp + local LAST_ERR=/tmp/tmp.NtUUW3kkP2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oZN7xXO1f4 + cat /tmp/tmp.NtUUW3kkP2 + rm /tmp/tmp.oZN7xXO1f4 /tmp/tmp.NtUUW3kkP2 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml /tmp/tmp.7PFIFwbwjt/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:11:37+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-05-16T20:11:37+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.7PFIFwbwjt/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ bc -l ++ echo '1.33 >= 1.33' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k133.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.33 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.33 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | del(.spec.updateStrategy.rollingUpdate.maxUnavailable) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-25359", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.MMLs9WvgA1 ++ mktemp + local LAST_ERR=/tmp/tmp.vXBLyFJsCI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MMLs9WvgA1 + cat /tmp/tmp.vXBLyFJsCI + rm /tmp/tmp.MMLs9WvgA1 /tmp/tmp.vXBLyFJsCI + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml /tmp/tmp.7PFIFwbwjt/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-05-16T20:11:39+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-05-16T20:11:39+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'verify clients agents statuses' + set +o xtrace ----------------------------------------------------------------------------------- verify clients agents statuses ----------------------------------------------------------------------------------- + sleep 300 ++ getSecretData my-cluster-secrets pmmserverkey ++ local secretName=my-cluster-secrets ++ local dataKey=pmmserverkey ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.G6u7KLlv5l +++ mktemp ++ local LAST_ERR=/tmp/tmp.Tlkh1BPLHp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.G6u7KLlv5l ++ cat /tmp/tmp.Tlkh1BPLHp ++ rm /tmp/tmp.G6u7KLlv5l /tmp/tmp.Tlkh1BPLHp ++ return 0 + API_KEY=eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dXiVwrlCCD +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.noL97rjv5n ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.dXiVwrlCCD ++++ cat /tmp/tmp.noL97rjv5n ++++ rm /tmp/tmp.dXiVwrlCCD /tmp/tmp.noL97rjv5n ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.OOdah3b3nm +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DXcEiQEoUj ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.OOdah3b3nm ++++ cat /tmp/tmp.DXcEiQEoUj ++++ rm /tmp/tmp.OOdah3b3nm /tmp/tmp.DXcEiQEoUj ++++ return 0 +++ endpoint=34.133.175.203 +++ '[' -n 34.133.175.203 ']' +++ '[' 34.133.175.203 '!=' null ']' +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ echo 34.133.175.203 +++ return ++ get_mgmnt_service_list eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 34.133.175.203 monitoring-2-0-25359 ++ local api_key=eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 ++ local endpoint=34.133.175.203 ++ local namespace=monitoring-2-0-25359 ++ jq 'walk(if type=="object" then with_entries(select(.key | test("service_id|node_id|agent_id|created_at|updated_at") | not)) else . end)' ++ jq 'walk(if type == "array" then sort_by(.agent_type) else . end)' ++ curl -s -k -H 'Authorization: Bearer eyJrIjoia1g0cGdrUE9GWlVBOGJBb2wzTzVvTTV5SjNvWWlqOXgiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9' -X POST https://34.133.175.203/v1/management/Service/List ++ /usr/bin/sed -i s/monitoring-2-0-25359-//g /tmp/tmp.7PFIFwbwjt/active_pmm_agents.json ++ jq '.services | sort_by(.node_name)' ++ cat /tmp/tmp.7PFIFwbwjt/active_pmm_agents.json ++ echo /tmp/tmp.7PFIFwbwjt/active_pmm_agents_sorted.json + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2467/e2e-tests/monitoring-2-0/compare/agents-list.json /tmp/tmp.7PFIFwbwjt/active_pmm_agents_sorted.json + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1778962547 ++ /usr/bin/date -u +%s + local end=1778962607 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ATn4v1epQQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Z5SlE8gHte +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ATn4v1epQQ +++ cat /tmp/tmp.Z5SlE8gHte +++ rm /tmp/tmp.ATn4v1epQQ /tmp/tmp.Z5SlE8gHte +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.AP1TR3txvV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.92Dd5JtHqM +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.AP1TR3txvV +++ cat /tmp/tmp.92Dd5JtHqM +++ rm /tmp/tmp.AP1TR3txvV /tmp/tmp.92Dd5JtHqM +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ head -n 1 ++ echo 34.133.175.203 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=34.133.175.203 ++ curl -s -k 'https://admin:admin@34.133.175.203/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0%22%7D%29&start=1778962547&end=1778962607&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1778962547, "1778954924" ], [ 1778962607, "1778954924" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1778962547, "1778954924" ], [ 1778962607, "1778954924" ] ] }' = null ']' + jq '.values[][1]' + echo -n '{ "metric": {}, "values": [ [ 1778962547, "1778954924" ], [ 1778962607, "1778954924" ] ] }' + grep '^"[0-9]' "1778954924" "1778954924" + get_metric_values mysql_global_status_uptime pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1778962551 ++ /usr/bin/date -u +%s + local end=1778962611 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UQYD069T7c ++++ mktemp +++ local LAST_ERR=/tmp/tmp.EVPXlAAODO +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.UQYD069T7c +++ cat /tmp/tmp.EVPXlAAODO +++ rm /tmp/tmp.UQYD069T7c /tmp/tmp.EVPXlAAODO +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.E7Zlj3EvW6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.McOD1s7JH9 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.E7Zlj3EvW6 +++ cat /tmp/tmp.McOD1s7JH9 +++ rm /tmp/tmp.E7Zlj3EvW6 /tmp/tmp.McOD1s7JH9 +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ echo 34.133.175.203 ++ return + local endpoint=34.133.175.203 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@34.133.175.203/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-pxc-0%22%7D%29&start=1778962551&end=1778962611&step=60' + local 'result={ "metric": {}, "values": [ [ 1778962551, "164" ], [ 1778962611, "224" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1778962551, "164" ], [ 1778962611, "224" ] ] }' = null ']' + jq '.values[][1]' + echo -n '{ "metric": {}, "values": [ [ 1778962551, "164" ], [ 1778962611, "224" ] ] }' + grep '^"[0-9]' "164" "224" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1778962554 ++ /usr/bin/date -u +%s + local end=1778962614 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.UZJ2c8H5Zs ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MbennVV5Kv +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.UZJ2c8H5Zs +++ cat /tmp/tmp.MbennVV5Kv +++ rm /tmp/tmp.UZJ2c8H5Zs /tmp/tmp.MbennVV5Kv +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1Of6afOuuO ++++ mktemp +++ local LAST_ERR=/tmp/tmp.79Xj1epnE3 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.1Of6afOuuO +++ cat /tmp/tmp.79Xj1epnE3 +++ rm /tmp/tmp.1Of6afOuuO /tmp/tmp.79Xj1epnE3 +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ head -n 1 ++ echo 34.133.175.203 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=34.133.175.203 ++ curl -s -k 'https://admin:admin@34.133.175.203/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0%22%7D%29&start=1778962554&end=1778962614&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1778962554, "0" ], [ 1778962614, "0" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1778962554, "0" ], [ 1778962614, "0" ] ] }' = null ']' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1778962554, "0" ], [ 1778962614, "0" ] ] }' + jq '.values[][1]' "0" "0" + get_metric_values haproxy_backend_active_servers pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1778962556 ++ /usr/bin/date -u +%s + local end=1778962616 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uwx2aU96mQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Mz1iE4pgui +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uwx2aU96mQ +++ cat /tmp/tmp.Mz1iE4pgui +++ rm /tmp/tmp.uwx2aU96mQ /tmp/tmp.Mz1iE4pgui +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Vwof88Uo9f ++++ mktemp +++ local LAST_ERR=/tmp/tmp.sth8SsJxcj +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Vwof88Uo9f +++ cat /tmp/tmp.sth8SsJxcj +++ rm /tmp/tmp.Vwof88Uo9f /tmp/tmp.sth8SsJxcj +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ head -n 1 ++ echo 34.133.175.203 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=34.133.175.203 ++ curl -s -k 'https://admin:admin@34.133.175.203/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-25359-monitoring-haproxy-0%22%7D%29&start=1778962556&end=1778962616&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1778962556, "1" ], [ 1778962616, "1" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1778962556, "1" ], [ 1778962616, "1" ] ] }' = null ']' + grep '^"[0-9]' + jq '.values[][1]' + echo -n '{ "metric": {}, "values": [ [ 1778962556, "1" ], [ 1778962616, "1" ] ] }' "1" "1" + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2026-05-16T19:46:59 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2026-05-16T20:16:59 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.pk7SR9fEc1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UjhpEAHZ1K +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.pk7SR9fEc1 +++ cat /tmp/tmp.UjhpEAHZ1K +++ rm /tmp/tmp.pk7SR9fEc1 /tmp/tmp.UjhpEAHZ1K +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z5Xi2CmdyB ++++ mktemp +++ local LAST_ERR=/tmp/tmp.P1QD8ho9sY +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.z5Xi2CmdyB +++ cat /tmp/tmp.P1QD8ho9sY +++ rm /tmp/tmp.z5Xi2CmdyB /tmp/tmp.P1QD8ho9sY +++ return 0 ++ endpoint=34.133.175.203 ++ '[' -n 34.133.175.203 ']' ++ '[' 34.133.175.203 '!=' null ']' ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ echo 34.133.175.203 ++ return + local endpoint=34.133.175.203 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@34.133.175.203/v0/qan/GetReport + jq '.rows[].fingerprint' null + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.iRTzAWdNTu ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wtgm0zpAON +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.iRTzAWdNTu +++ cat /tmp/tmp.wtgm0zpAON +++ rm /tmp/tmp.iRTzAWdNTu /tmp/tmp.wtgm0zpAON +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dD3qwJTEW0 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.tRB8PVCpI3 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.dD3qwJTEW0 +++ cat /tmp/tmp.tRB8PVCpI3 +++ rm /tmp/tmp.dD3qwJTEW0 /tmp/tmp.tRB8PVCpI3 +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Jq4Z0Hrr4I ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7EzT7LVOaw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Jq4Z0Hrr4I +++ cat /tmp/tmp.7EzT7LVOaw +++ rm /tmp/tmp.Jq4Z0Hrr4I /tmp/tmp.7EzT7LVOaw +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.OIYgzVnUqk ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QjDnUpZLwf +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.OIYgzVnUqk +++ cat /tmp/tmp.QjDnUpZLwf +++ rm /tmp/tmp.OIYgzVnUqk /tmp/tmp.QjDnUpZLwf +++ return 0 ++ echo /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 /node_id/f7bcc431-2817-4570-a987-72eca27a307d /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 /node_id/f7bcc431-2817-4570-a987-72eca27a307d /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.0w2os3uy8R ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.hHKeKTfIc2 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.0w2os3uy8R +++++ cat /tmp/tmp.hHKeKTfIc2 +++++ rm /tmp/tmp.0w2os3uy8R /tmp/tmp.hHKeKTfIc2 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.eWnwiXN2kb +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ID3FlQSLsx ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.eWnwiXN2kb ++++ cat /tmp/tmp.ID3FlQSLsx ++++ rm /tmp/tmp.eWnwiXN2kb /tmp/tmp.ID3FlQSLsx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.5yV0JAtVeU +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.QhDRV7psXi ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.5yV0JAtVeU ++++ cat /tmp/tmp.QhDRV7psXi ++++ rm /tmp/tmp.5yV0JAtVeU /tmp/tmp.QhDRV7psXi ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.EhyLVrgZBA ++++ mktemp +++ local LAST_ERR=/tmp/tmp.c9W9lmFy7H +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.EhyLVrgZBA +++ cat /tmp/tmp.c9W9lmFy7H +++ rm /tmp/tmp.EhyLVrgZBA /tmp/tmp.c9W9lmFy7H +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ grep /node_id/f7bcc431-2817-4570-a987-72eca27a307d ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.OpWbGD48Jh ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.RtUy1UCUnJ +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.OpWbGD48Jh +++++ cat /tmp/tmp.RtUy1UCUnJ +++++ rm /tmp/tmp.OpWbGD48Jh /tmp/tmp.RtUy1UCUnJ +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wurP7ze4PM +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.JKHntHy4eQ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.wurP7ze4PM ++++ cat /tmp/tmp.JKHntHy4eQ ++++ rm /tmp/tmp.wurP7ze4PM /tmp/tmp.JKHntHy4eQ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.C82lMY9RtB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oUkqfnidB3 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.C82lMY9RtB ++++ cat /tmp/tmp.oUkqfnidB3 ++++ rm /tmp/tmp.C82lMY9RtB /tmp/tmp.oUkqfnidB3 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZYRztGKLM5 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Cdl7vFEatv +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ZYRztGKLM5 +++ cat /tmp/tmp.Cdl7vFEatv +++ rm /tmp/tmp.ZYRztGKLM5 /tmp/tmp.Cdl7vFEatv +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ruANwiRqSh ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.qPPd27G0oa +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.ruANwiRqSh +++++ cat /tmp/tmp.qPPd27G0oa +++++ rm /tmp/tmp.ruANwiRqSh /tmp/tmp.qPPd27G0oa +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.p1ipSBqaPA +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.m5FFvNNNsQ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.p1ipSBqaPA ++++ cat /tmp/tmp.m5FFvNNNsQ ++++ rm /tmp/tmp.p1ipSBqaPA /tmp/tmp.m5FFvNNNsQ ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.Wbda2cDXmL +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2hgYg74RO1 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.Wbda2cDXmL ++++ cat /tmp/tmp.2hgYg74RO1 ++++ rm /tmp/tmp.Wbda2cDXmL /tmp/tmp.2hgYg74RO1 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.7J4ghBgVPM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.hu3zo7vAux +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.7J4ghBgVPM +++ cat /tmp/tmp.hu3zo7vAux +++ rm /tmp/tmp.7J4ghBgVPM /tmp/tmp.hu3zo7vAux +++ return 0 ++ echo /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 /node_id/f7bcc431-2817-4570-a987-72eca27a307d /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/f7bcc431-2817-4570-a987-72eca27a307d ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 ']' + kubectl_bin patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.7shSDJ2t91 ++ mktemp + local LAST_ERR=/tmp/tmp.nSLBjdu07d + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7shSDJ2t91 perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.nSLBjdu07d + rm /tmp/tmp.7shSDJ2t91 /tmp/tmp.nSLBjdu07d + return 0 + wait_for_delete pod/monitoring-pxc-0 + local res=pod/monitoring-pxc-0 + echo -n 'waiting for pod/monitoring-pxc-0 to be deleted' waiting for pod/monitoring-pxc-0 to be deleted+ set +o xtrace ..............Error from server (NotFound): pods "monitoring-pxc-0" not found + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 /node_id/f7bcc431-2817-4570-a987-72eca27a307d /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/177027f5-8bfd-42e8-b49e-0cb63d664ae6 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.ruyx8eepMG ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.88WKtL73n2 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.ruyx8eepMG +++++ cat /tmp/tmp.88WKtL73n2 +++++ rm /tmp/tmp.ruyx8eepMG /tmp/tmp.88WKtL73n2 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JMN9ofsyK9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.K1YWhsVnwu ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.JMN9ofsyK9 ++++ cat /tmp/tmp.K1YWhsVnwu ++++ rm /tmp/tmp.JMN9ofsyK9 /tmp/tmp.K1YWhsVnwu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.gYWy8fqPKZ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ziQrLmYwZs ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.gYWy8fqPKZ ++++ cat /tmp/tmp.ziQrLmYwZs ++++ rm /tmp/tmp.gYWy8fqPKZ /tmp/tmp.ziQrLmYwZs ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.di4twqfXkc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.RxDyPHUPja +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.di4twqfXkc +++ cat /tmp/tmp.RxDyPHUPja +++ rm /tmp/tmp.di4twqfXkc /tmp/tmp.RxDyPHUPja +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ grep /node_id/f7bcc431-2817-4570-a987-72eca27a307d +++ awk '{print $4}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.XmHKO334Xo ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.NkelBEu90x +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.XmHKO334Xo +++++ cat /tmp/tmp.NkelBEu90x +++++ rm /tmp/tmp.XmHKO334Xo /tmp/tmp.NkelBEu90x +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.WExAueYMhV +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.Rpmi5H17s2 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.WExAueYMhV ++++ cat /tmp/tmp.Rpmi5H17s2 ++++ rm /tmp/tmp.WExAueYMhV /tmp/tmp.Rpmi5H17s2 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.I54yRID5XT +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.K3WwZDHtEO ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.I54yRID5XT ++++ cat /tmp/tmp.K3WwZDHtEO ++++ rm /tmp/tmp.I54yRID5XT /tmp/tmp.K3WwZDHtEO ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.yNixtRASA8 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2KYGB4kf4G +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.yNixtRASA8 +++ cat /tmp/tmp.2KYGB4kf4G +++ rm /tmp/tmp.yNixtRASA8 /tmp/tmp.2KYGB4kf4G +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/c3ba49b4-0ef2-4e63-a86b-3c94f9379804 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ awk '{print $4}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.7j9mb5kihz ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.DTK4H5SXlL +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.7j9mb5kihz +++++ cat /tmp/tmp.DTK4H5SXlL +++++ rm /tmp/tmp.7j9mb5kihz /tmp/tmp.DTK4H5SXlL +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9xfG4520Xi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.A4ZshlzBRf ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.9xfG4520Xi ++++ cat /tmp/tmp.A4ZshlzBRf ++++ rm /tmp/tmp.9xfG4520Xi /tmp/tmp.A4ZshlzBRf ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.QhZfZhXoxP +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.fwgZOFIVDN ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.QhZfZhXoxP ++++ cat /tmp/tmp.fwgZOFIVDN ++++ rm /tmp/tmp.QhZfZhXoxP /tmp/tmp.fwgZOFIVDN ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PLSEY9eGTH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gCNXRwBFvD +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-25359 monitoring-0 -- pmm-admin --server-url=https://admin:admin@34.133.175.203/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PLSEY9eGTH +++ cat /tmp/tmp.gCNXRwBFvD +++ rm /tmp/tmp.PLSEY9eGTH /tmp/tmp.gCNXRwBFvD +++ return 0 ++ echo + [[ -n '' ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-25359 + local namespace=monitoring-2-0-25359 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v 'get backup status: Job.batch' + sort -u + tee /tmp/tmp.7PFIFwbwjt/operator.log + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + grep -v level=info + grep -v 'the object has been modified' ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.124Tets3Rq +++ mktemp ++ local LAST_ERR=/tmp/tmp.VeXFIjkpdh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.124Tets3Rq ++ cat /tmp/tmp.VeXFIjkpdh ++ rm /tmp/tmp.124Tets3Rq /tmp/tmp.VeXFIjkpdh ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-9d9fbdb5-4wm8r ++ mktemp + local LAST_OUT=/tmp/tmp.1sOqoZdjgK ++ mktemp + local LAST_ERR=/tmp/tmp.00V96jNvne + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-9d9fbdb5-4wm8r + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1sOqoZdjgK + cat /tmp/tmp.00V96jNvne + rm /tmp/tmp.1sOqoZdjgK /tmp/tmp.00V96jNvne + return 0 2026-05-16T19:55:24.287Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2026-05-16T19:55:24.287Z INFO setup Manager starting up {"gitCommit": "3dc7f023721e421071d4d2126e295ac1467895dd", "gitBranch": "PR-2467-3dc7f023", "buildTime": "2026-05-16T18:00:40Z", "goVersion": "go1.26.3", "os": "linux", "arch": "amd64"} 2026-05-16T19:55:24.287Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.33.11-gke.1137000"} 2026-05-16T19:55:24.290Z INFO setup Registering Components. 2026-05-16T19:55:24.786Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2026-05-16T19:55:24.787Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2026-05-16T19:55:24.787Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2026-05-16T19:55:24.787Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2026-05-16T19:55:24.787Z INFO controller-runtime.metrics Starting metrics server 2026-05-16T19:55:24.787Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2026-05-16T19:55:24.787Z INFO controller-runtime.webhook Starting webhook server 2026-05-16T19:55:24.787Z INFO setup Starting the Cmd. 2026-05-16T19:55:24.787Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2026-05-16T19:55:24.887Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2026-05-16T19:55:24.917Z DEBUG events percona-xtradb-cluster-operator-9d9fbdb5-4wm8r_c86cd8f9-aba0-4b13-acb8-c81990908fd2 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"89700376-db2b-46b8-b6c6-632a30de5c75","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1778961324911711009"}, "reason": "LeaderElection"} 2026-05-16T19:55:24.917Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2026-05-16T19:55:24.917Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2026-05-16T19:55:24.917Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2026-05-16T19:55:24.917Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2026-05-16T19:55:24.917Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2026-05-16T19:55:25.118Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2026-05-16T19:55:25.118Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2026-05-16T19:55:25.118Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2026-05-16T19:55:25.118Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2026-05-16T19:55:25.118Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2026-05-16T19:55:25.118Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2026-05-16T19:57:32.278Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "version": "1.20.0"} 2026-05-16T19:57:35.141Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-05-16T19:57:35.197Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-05-16T19:57:35.301Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T19:57:35.366Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T19:57:35.406Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T19:57:35.507Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b75ba11d-393b-438e-a22f-cc2edd6012d3", "object": "monitoring-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-05-16T19:57:36.506Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "29a76d41-7d06-41bb-96cb-76c1e5ac94c8", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-05-16T19:57:36.526Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "29a76d41-7d06-41bb-96cb-76c1e5ac94c8", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-05-16T19:58:22.350Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584", "user": "operator"} 2026-05-16T19:58:22.379Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584", "user": "monitor"} 2026-05-16T19:58:22.420Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584"} 2026-05-16T19:58:22.452Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584"} 2026-05-16T19:58:22.480Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584", "user": "xtrabackup"} 2026-05-16T19:58:22.517Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584"} 2026-05-16T19:58:22.547Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584", "user": "replication"} 2026-05-16T19:58:24.554Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "7c54908e-723c-4747-b0fe-3539b926a584", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.226.95:3306: connect: connection refused"} 2026-05-16T20:01:11.564Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "962e2e95-7d99-4455-8433-a93a50008bb8", "user": "root"} 2026-05-16T20:01:11.691Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "962e2e95-7d99-4455-8433-a93a50008bb8", "new version": "8.0.43-34.1"} 2026-05-16T20:04:42.615Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "3373d843-bd76-480a-9c67-3748e9230d53", "user": "pmmserverkey"} 2026-05-16T20:04:42.895Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f4ea9f27-c9ca-4c77-8918-cbe57d93d8dd", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:04:42.969Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f4ea9f27-c9ca-4c77-8918-cbe57d93d8dd", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:04:43.098Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f4ea9f27-c9ca-4c77-8918-cbe57d93d8dd", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:05:41.788Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "cf462679-776d-4ce6-bd19-3a927464ba8c", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-25359 on 34.118.224.10:53: no such host"} 2026-05-16T20:07:03.094Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b5134fbe-cf31-461e-872f-d34794a9f34f", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-2-0-25359 on 34.118.224.10:53: no such host"} 2026-05-16T20:07:50.324Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "c7254c0a-5abd-4df9-b5a0-3bc67cc6bfd2", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:07:50.395Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "c7254c0a-5abd-4df9-b5a0-3bc67cc6bfd2", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:07:50.502Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "c7254c0a-5abd-4df9-b5a0-3bc67cc6bfd2", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:08:53.343Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "b7d8cc40-cd61-40fb-8db4-485e143bbcda", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-25359 on 34.118.224.10:53: no such host"} 2026-05-16T20:09:45.945Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "93e73e83-8676-427a-bf82-c9f988dfece1", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: invalid connection"} 2026-05-16T20:10:37.897Z INFO Password changed, updating user {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "user": "pmmserverkey"} 2026-05-16T20:10:37.914Z INFO HAProxy pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "last-applied-secret": "b4a0fd68b555a39e346f3bc93f601af73161a54d65f11198aa59e11d0dda74f4"} 2026-05-16T20:10:37.914Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "user": "pmmserverkey"} 2026-05-16T20:10:37.914Z INFO PXC pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "last-applied-secret": "b4a0fd68b555a39e346f3bc93f601af73161a54d65f11198aa59e11d0dda74f4"} 2026-05-16T20:10:37.916Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:10:37.994Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "f708bc9d-d64c-4187-b375-e01691ac1e5f", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:11:37.936Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "d9b54583-e71a-4138-a112-b513d0c74169", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-25359 on 34.118.224.10:53: no such host"} 2026-05-16T20:11:43.658Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "3f85495a-0381-4ed2-af53-d4c880f96cfc", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-2-0-25359 on 34.118.224.10:53: no such host"} 2026-05-16T20:17:44.035Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "a5d20eea-5176-43de-be0b-2ca939a47eea", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:17:44.094Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "a5d20eea-5176-43de-be0b-2ca939a47eea", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2026-05-16T20:17:44.176Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-25359"}, "namespace": "monitoring-2-0-25359", "name": "monitoring", "reconcileID": "a5d20eea-5176-43de-be0b-2ca939a47eea", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} [mysql] 2026/05/16 20:07:02 packets.go:58 unexpected EOF [mysql] 2026/05/16 20:09:45 packets.go:58 read tcp 10.116.114.41:50898->10.116.114.46:33062: read: connection reset by peer -  }, -  { -  }, +  }, -  Annotations: map[string]string{ +  Annotations: map[string]string{ +  APIVersion: "", -  APIVersion: "apps/v1", -  APIVersion: "apps/v1", -  APIVersion: "v1", +  AvailableReplicas: 0, -  AvailableReplicas: 2, -  AvailableReplicas: 3, -  CollisionCount: &0, +  CollisionCount: nil, +  CreationTimestamp: v1.Time{}, -  CreationTimestamp: v1.Time{Time: s"2026-05-16 19:57:35 +0000 UTC"}, +  CurrentReplicas: 0, -  CurrentReplicas: 2, -  CurrentReplicas: 3, +  CurrentRevision: "", -  CurrentRevision: "monitoring-haproxy-575bdf49d4", -  CurrentRevision: "monitoring-haproxy-5f8674d8fb", -  CurrentRevision: "monitoring-haproxy-7b8468fb86", -  CurrentRevision: "monitoring-haproxy-9bf455dbb", -  CurrentRevision: "monitoring-pxc-68d86bb54b", -  CurrentRevision: "monitoring-pxc-6b445d4f6b", -  CurrentRevision: "monitoring-pxc-6b5fc7d956", -  CurrentRevision: "monitoring-pxc-7966779fd4", -  DefaultMode: &420, -  DefaultMode: &420, +  DefaultMode: nil, +  DefaultMode: nil, +  DeprecatedServiceAccount: "", -  DeprecatedServiceAccount: "default", +  DNSPolicy: "", -  DNSPolicy: "ClusterFirst", -  FieldsType: "FieldsV1", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., +  Generation: 0, -  Generation: 1, -  Generation: 2, -  Generation: 3, -  Generation: 4, -  Key: "pmmserver", +  Key: "pmmserverkey", +  "last-applied-secret": "b4a0fd68b555a39e346f3bc93f601af73161a54d65f11198aa59e11d0dda74f4", +  ManagedFields: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  Manager: "kube-controller-manager", -  Manager: "percona-xtradb-cluster-operator", +  ObservedGeneration: 0, -  ObservedGeneration: 1, -  ObservedGeneration: 2, -  ObservedGeneration: 3, -  ObservedGeneration: 4, -  Operation: "Update", -  Operation: "Update", +  "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiJiNGEwZmQ2OGI1NTVhMzllMzQ2ZjNiYzkzZjYwMWFmNzMxNjFhNTRkNjVmMTExOThhYTU5ZTExZDBkZGE3NGY0IiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoi"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjQ2Ny0zZGM3ZjAyMyIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yNDY3LTNkYzdmMDIzIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYWRtaW4ifSx7Im5hbWUiOiJQTU1fUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVyIn19fSx7Im5hbWUiOiJDTElFTlRfUE9SVF9MSVNURU4iLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBPRF9OQU1FIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWUifX19LHsibmFtZSI6IlBPRF9OQU1FU1BBU0UiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZXNwYWNlIn19fSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0FERFJFU1MiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfVVNFUk5BTUUiLCJ2YWx1ZSI6ImFkbWluIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXIifX19LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fUE9SVCIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQTU1fQUdFTlRfQ09ORklHX0ZJTEUiLCJ2YWx1ZSI6Ii91c3IvbG9jYWwvcGVyY29uYS9wbW0yL2NvbmZpZy9wbW0tYWdlbnQueWFtbCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfSU5TRUNVUkVfVExTIiwidmFsdWUiOiIxIn0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9BRERSRVNTIiwidmFsdWUiOiIwLjAuMC4wIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFVFVQX01FVFJJQ1NfTU9ERSIsInZhbHVlIjoicHVz"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjQ2Ny0zZGM3ZjAyMyIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yNDY3LTNkYzdmMDIzIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXJrZXkifX19LHsibmFtZSI6IkNMSUVOVF9QT1JUX0xJU1RFTiIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE9EX05BTUUiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZSJ9fX0seyJuYW1lIjoiUE9EX05BTUVTUEFTRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lc3BhY2UifX19LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfQUREUkVTUyIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9VU0VSTkFNRSIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVya2V5In19fSx7Im5hbWUiOiJQTU1fQUdFTlRfTElTVEVOX1BPUlQiLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE1NX0FHRU5UX0NPTkZJR19GSUxFIiwidmFsdWUiOiIvdXNyL2xvY2FsL3BlcmNvbmEvcG1tMi9jb25maWcvcG1tLWFnZW50LnlhbWwifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0lOU0VDVVJFX1RMUyIsInZhbHVlIjoiMSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fQUREUkVTUyIsInZhbHVlIjoiMC4wLjAuMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVRVUF9NRVRSSUNTX01PREUiLCJ2"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiJiNGEwZmQ2OGI1NTVhMzllMzQ2ZjNiYzkzZjYwMWFmNzMxNjFhNTRkNjVmMTExOThhYTU5ZTExZDBkZGE3NGY0IiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoiZDQxZDhjZDk4ZjAw"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiIzMjI4Njk1OTE4MDhjNTA4Nzk1ZDg0MGQ2OTVlMzAwMCIsInBlcmNvbmEu"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiIzMjI4Njk1OTE4MDhjNTA4Nzk1ZDg0MGQ2OTVlMzAwMCIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiZGU2M2FlNDY3NjcxMDA3YWM2NDRhMDI3YjhhY2U0MjAifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJiaW4iLCJlbXB0eURpciI6e319XSwiaW5pdENvbnRhaW5lcnMiOlt7Im5hbWUiOiJweGMtaW5pdCIsImltYWdlIjoicGVyY29uYWxhYi9wZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yOlBSLTI0NjctM2RjN2YwMjMiLCJjb21tYW5kIjpbIi9weGMtaW5pdC1lbnRyeXBvaW50LnNoIl0sInJlc291cmNlcyI6eyJsaW1pdHMiOnsiY3B1IjoiNTBtIiwibWVtb3J5IjoiNTBNIn19LCJ2b2x1bWVNb3VudHMiOlt7Im5hbWUiOiJkYXRhZGlyIiwibW91bnRQYXRoIjoiL3Zhci9saWIvbXlzcWwifSx7Im5hbWUiOiJiaW4iLCJtb3VudFBhdGgiOiIvb3B0L3BlcmNvbmEifV0sImltYWdlUHVsbFBvbGljeSI6IkFsd2F5cyJ9XSwiY29udGFpbmVycyI6W3sibmFtZSI6InBtbS1jbGllbnQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcG1tLWNsaWVudDpkZXYtbGF0ZXN0IiwicG9ydHMiOlt7ImNvbnRhaW5lclBvcnQiOjc3Nzd9LHsiY29udGFpbmVyUG9ydCI6MzAxMDB9LHsiY29udGFpbmVyUG9ydCI6MzAxMDF9LHsiY29udGFpbmVyUG9ydCI6MzAxMDJ9LHsiY29udGFpbmVyUG9ydCI6MzAxMDN9LHsiY29udGFpbmVyUG9ydCI6MzAxMDR9LHsiY29udGFpbmVyUG9ydCI6MzAxMDV9XSwiZW52RnJvbSI6W3sic2VjcmV0UmVmIjp7Im5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19XSwiZW52IjpbeyJuYW1lIjoiUE1NX1NFUlZFUiIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX1VTRVIiLCJ2YWx1ZSI6ImFkbWluIn0seyJuYW1lIjoiUE1NX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlciJ9fX0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTElTVEVOIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQT0RfTkFNRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lIn19fSx7Im5hbWUiOiJQT0RfTkFNRVNQQVNFIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWVzcGFjZSJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9BRERSRVNTIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1VTRVJOQU1FIiwidmFsdWUiOiJhZG1pbiJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVyIn19fSx7Im5hbWUiOiJQTU1fQUdFTlRfTElTVEVOX1BPUlQiLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE1NX0FHRU5UX0NPTkZJR19GSUxFIiwidmFsdWUiOiIvdXNyL2xv"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiIzMjI4Njk1OTE4MDhjNTA4Nzk1ZDg0MGQ2OTVlMzAwMCIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiZGU2M2FlNDY3NjcxMDA3YWM2NDRhMDI3YjhhY2U0MjAifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJiaW4iLCJlbXB0eURpciI6e319XSwiaW5pdENvbnRhaW5lcnMiOlt7Im5hbWUiOiJweGMtaW5pdCIsImltYWdlIjoicGVyY29uYWxhYi9wZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yOlBSLTI0NjctM2RjN2YwMjMiLCJjb21tYW5kIjpbIi9weGMtaW5pdC1lbnRyeXBvaW50LnNoIl0sInJlc291cmNlcyI6eyJsaW1pdHMiOnsiY3B1IjoiNTBtIiwibWVtb3J5IjoiNTBNIn19LCJ2b2x1bWVNb3VudHMiOlt7Im5hbWUiOiJkYXRhZGlyIiwibW91bnRQYXRoIjoiL3Zhci9saWIvbXlzcWwifSx7Im5hbWUiOiJiaW4iLCJtb3VudFBhdGgiOiIvb3B0L3BlcmNvbmEifV0sImltYWdlUHVsbFBvbGljeSI6IkFsd2F5cyJ9XSwiY29udGFpbmVycyI6W3sibmFtZSI6InBtbS1jbGllbnQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcG1tLWNsaWVudDpkZXYtbGF0ZXN0IiwicG9ydHMiOlt7ImNvbnRhaW5lclBvcnQiOjc3Nzd9LHsiY29udGFpbmVyUG9ydCI6MzAxMDB9LHsiY29udGFpbmVyUG9ydCI6MzAxMDF9LHsiY29udGFpbmVyUG9ydCI6MzAxMDJ9LHsiY29udGFpbmVyUG9ydCI6MzAxMDN9LHsiY29udGFpbmVyUG9ydCI6MzAxMDR9LHsiY29udGFpbmVyUG9ydCI6MzAxMDV9XSwiZW52RnJvbSI6W3sic2VjcmV0UmVmIjp7Im5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19XSwiZW52IjpbeyJuYW1lIjoiUE1NX1NFUlZFUiIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX1VTRVIiLCJ2YWx1ZSI6ImFwaV9rZXkifSx7Im5hbWUiOiJQTU1fUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVya2V5In19fSx7Im5hbWUiOiJDTElFTlRfUE9SVF9MSVNURU4iLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBPRF9OQU1FIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWUifX19LHsibmFtZSI6IlBPRF9OQU1FU1BBU0UiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZXNwYWNlIn19fSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0FERFJFU1MiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfVVNFUk5BTUUiLCJ2YWx1ZSI6ImFwaV9rZXkifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlcmtleSJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9QT1JUIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9DT05GSUdfRklMRSIsInZhbHVl"..., -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., +  PeriodSeconds: 0, -  PeriodSeconds: 10, +  PersistentVolumeClaimRetentionPolicy: nil, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  Phase: "", -  Phase: "Pending", +  PodManagementPolicy: "", -  PodManagementPolicy: "OrderedReady", +  Protocol: "", -  Protocol: "TCP", +  ReadyReplicas: 0, -  ReadyReplicas: 2, -  ReadyReplicas: 3, +  Replicas: 0, +  Replicas: &0, -  Replicas: 2, -  Replicas: &2, -  Replicas: 3, -  Replicas: &3, +  ResourceVersion: "", -  ResourceVersion: "1778961530150335008", -  ResourceVersion: "1778961667501375004", -  ResourceVersion: "1778961937071343008", -  ResourceVersion: "1778962050109375004", -  ResourceVersion: "1778962122407935008", -  ResourceVersion: "1778962234300543004", -  ResourceVersion: "1778962294728367008", -  ResourceVersion: "1778962405993263004", +  RestartPolicy: "", -  RestartPolicy: "Always", -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil, +  SchedulerName: "", +  SchedulerName: "", -  SchedulerName: "default-scheduler", -  SchedulerName: "default-scheduler", +  Scheme: "", -  Scheme: "HTTP", +  SecurityContext: nil, -  SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., -  Subresource: "status", +  TerminationMessagePath: "", -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePolicy: "", -  TerminationMessagePolicy: "File", -  Time: s"2026-05-16 19:57:35 +0000 UTC", -  Time: s"2026-05-16 19:58:50 +0000 UTC", -  Time: s"2026-05-16 20:01:07 +0000 UTC", -  Time: s"2026-05-16 20:04:42 +0000 UTC", -  Time: s"2026-05-16 20:05:37 +0000 UTC", -  Time: s"2026-05-16 20:07:30 +0000 UTC", -  Time: s"2026-05-16 20:07:50 +0000 UTC", -  Time: s"2026-05-16 20:08:42 +0000 UTC", -  Time: s"2026-05-16 20:10:34 +0000 UTC", -  Time: s"2026-05-16 20:10:37 +0000 UTC", -  Time: s"2026-05-16 20:10:38 +0000 UTC", -  Time: s"2026-05-16 20:11:34 +0000 UTC", -  Time: s"2026-05-16 20:13:25 +0000 UTC", -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, +  UID: "", -  UID: "6dd7feda-8cb0-4676-98fe-3abd9dd473e5", -  UID: "b7962863-ca6a-4538-bc7a-eee4dfc61af5", +  UpdatedReplicas: 0, -  UpdatedReplicas: 2, -  UpdatedReplicas: 3, +  UpdateRevision: "", -  UpdateRevision: "monitoring-haproxy-575bdf49d4", -  UpdateRevision: "monitoring-haproxy-5f8674d8fb", -  UpdateRevision: "monitoring-haproxy-7b8468fb86", -  UpdateRevision: "monitoring-haproxy-9bf455dbb", -  UpdateRevision: "monitoring-pxc-68d86bb54b", -  UpdateRevision: "monitoring-pxc-6b445d4f6b", -  UpdateRevision: "monitoring-pxc-6b5fc7d956", -  UpdateRevision: "monitoring-pxc-7966779fd4", -  Value: "admin", +  Value: "api_key", +  Value: "$(PMM_PREFIX)$(POD_NAMESPASE)-$(POD_NAME)", -  Value: "$(POD_NAMESPASE)-$(POD_NAME)", -  VolumeMode: &"Filesystem", +  VolumeMode: nil,   }    },    },    {    },    },    {    },    },    {    },    },    },    ... // 11 identical elements    ... // 12 identical elements    ... // 16 identical fields    ... // 16 identical fields    ... // 22 identical elements    ... // 22 identical fields    ... // 23 identical elements    ... // 25 identical elements    ... // 26 identical elements    ... // 2 identical entries    ... // 2 identical fields    ... // 2 identical fields    ... // 2 identical fields    ... // 2 identical fields    ... // 3 identical fields    ... // 3 identical fields    ... // 3 identical fields    ... // 4 identical elements    ... // 4 identical fields    ... // 4 identical fields    ... // 5 identical fields    ... // 6 identical fields    ... // 7 identical fields    ... // 8 identical fields    ... // 9 identical elements    ... // 9 identical fields    ... // 9 identical fields    AccessModes: nil,    ActiveDeadlineSeconds: nil,    Affinity: nil,    Affinity: nil,    Annotations: map[string]string{    Args: {"haproxy"},    Args: {"mysqld"},    Args: nil,    AutomountServiceAccountToken: nil,    AWSElasticBlockStore: nil,    AzureFile: nil,    Capacity: nil,    Conditions: nil,    ConfigMapKeyRef: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    ContainerPort: 30100,    ContainerPort: 30101,    ContainerPort: 30102,    ContainerPort: 30103,    ContainerPort: 30104,    ContainerPort: 30105,    ContainerPort: 3306,    ContainerPort: 33060,    ContainerPort: 33062,    ContainerPort: 3307,    ContainerPort: 3309,    ContainerPort: 4444,    ContainerPort: 4567,    ContainerPort: 4568,    ContainerPort: 7777,    ContainerPort: 8404,    Containers: []v1.Container{    DataSource: nil,    DataSourceRef: nil,    DeletionGracePeriodSeconds: nil,    DeletionGracePeriodSeconds: nil,    DeletionTimestamp: nil,    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...},    Env: []v1.EnvVar{    EphemeralContainers: nil,    Exec: nil,    FailureThreshold: 3,    FC: nil,    FieldPath: "metadata.name",    FieldPath: "metadata.namespace",    FieldRef: nil,    FieldRef: &v1.ObjectFieldSelector{    FileKeyRef: nil,    Finalizers: nil,    Finalizers: nil,    GitRepo: nil,    GRPC: nil,    Host: "",    HostAliases: nil,    HostAliases: nil,    HostIP: "",    HostIPC: false,    Hostname: "",    HostPort: 0,    HTTPGet: &v1.HTTPGetAction{    HTTPHeaders: nil,    ImagePullPolicy: "Always",    ImagePullSecrets: nil,    InitContainers: []v1.Container{    InitialDelaySeconds: 15,    InitialDelaySeconds: 300,    InitialDelaySeconds: 300,    ISCSI: nil,    Items: nil,    Items: nil,    "kubectl.kubernetes.io/default-container": "haproxy",    "kubectl.kubernetes.io/default-container": "pxc",    Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Labels: nil,    Lifecycle: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}},    LivenessProbe: &v1.Probe{    LocalObjectReference: {Name: "auto-monitoring-pxc"},    LocalObjectReference: {Name: "internal-monitoring"},    LocalObjectReference: {Name: "monitoring-haproxy"},    LocalObjectReference: {Name: "monitoring-pxc"},    ManagedFields: nil,    MinReadySeconds: 0,    Name: "",    Name: "auto-config",    {Name: "bin", VolumeSource: {EmptyDir: &{}}},    {Name: "CLIENT_PORT_LISTEN", Value: "7777"},    {Name: "CLIENT_PORT_MAX", Value: "30105"},    {Name: "CLIENT_PORT_MIN", Value: "30100"},    Name: "config",    {Name: "DB_TYPE", Value: "haproxy"},    {Name: "DB_TYPE", Value: "mysql"},    {Name: "DB_USER", Value: "monitor"},    {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}},    Name: "haproxy-custom",    Name: "ist",    {Name: "MONITOR_USER", Value: "monitor"},    Name: "my-env-var-secrets",    Name: "mysql",    Name: "mysql-admin",    Name: "mysql-init-file",    Name: "mysql-replicas",    Name: "mysql-users-secret-file",    Name: "mysqlx",    {Name: "PMM_AGENT_LISTEN_PORT", Value: "7777"},    {Name: "PMM_AGENT_PORTS_MIN", Value: "30100"},    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    Name: "PMM_AGENT_SERVER_PASSWORD",    Name: "PMM_AGENT_SERVER_USERNAME",    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "api_key"},    {Name: "PMM_AGENT_SETUP_FORCE", Value: "1"},    Name: "PMM_AGENT_SETUP_NODE_NAME",    {Name: "PMM_AGENT_SETUP_NODE_TYPE", Value: "container"},    Name: "PMM_PASSWORD",    {Name: "PMM_SERVER", Value: "monitoring-service"},    Name: "PMM_USER",    Name: "POD_NAME",    Name: "POD_NAMESPASE",    Name: "proxy-protocol",    Namespace: "monitoring-2-0-25359",    Name: "ssl",    Name: "ssl-internal",    Name: "sst",    Name: "stats",    {Name: "tmp", VolumeSource: {EmptyDir: &{}}},    Name: "vault-keyring-secret",    Name: "write-set",    NFS: nil,    NodeName: "",    NodeSelector: nil,    ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "last-applied-secret": "b4a0fd68b555a39e346f3bc93f601af73161a54d65f11198aa59e11d0dda74f4", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0"}},    ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e"}},    ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "last-applied-secret": "b4a0fd68b555a39e346f3bc93f601af73161a54d65f11198aa59e11d0dda74f4", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", ...}},    ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/ssl-hash": "322869591808c508795d840d695e3000", "percona.com/ssl-internal-hash": "de63ae467671007ac644a027b8ace420"}},    ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    ObjectMeta: v1.ObjectMeta{    ObjectMeta: v1.ObjectMeta{    Optional: &false,    Optional: nil,    Optional: &true,    Optional: &true,    Ordinals: nil,    OS: nil,    Overhead: nil,    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "7ebb7d89-573c-45b6-88f2-bcc22a812746", ...}},    OwnerReferences: nil,    Path: "/local/Status",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e",    "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    "percona.com/ssl-hash": "322869591808c508795d840d695e3000",    "percona.com/ssl-internal-hash": "de63ae467671007ac644a027b8ace420",    Port: {IntVal: 7777},    Ports: []v1.ContainerPort{    PreemptionPolicy: nil,    ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}},    ProbeHandler: v1.ProbeHandler{    Quobyte: nil,    ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...},    ReadinessProbe: &v1.Probe{    Replicas: &2,    Replicas: &3,    ResizePolicy: nil,    ResourceFieldRef: nil,    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    SecretKeyRef: &v1.SecretKeySelector{    SecretName: "internal-monitoring",    SecretName: "monitoring-mysql-init",    SecretName: "monitoring-ssl-internal",    SecretName: "monitoring-vault",    SecretName: "my-env-var-secrets",    SecretName: "some-name-ssl",    Secret: &v1.SecretVolumeSource{    SecurityContext: nil,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    SelfLink: "",    ServiceAccountName: "default",    ServiceName: "monitoring-haproxy",    ServiceName: "monitoring-pxc",    SetHostnameAsFQDN: nil,    ShareProcessNamespace: nil,    Spec: v1.PersistentVolumeClaimSpec{    Spec: v1.PodSpec{    Spec: v1.StatefulSetSpec{    StartupProbe: nil,    Status: v1.PersistentVolumeClaimStatus{    Status: v1.StatefulSetStatus{    StorageClassName: nil,    Subdomain: "",    Subdomain: "",    SuccessThreshold: 1,    TCPSocket: nil,    Template: v1.PodTemplateSpec{    TerminationGracePeriodSeconds: &30,    TerminationGracePeriodSeconds: &600,    TerminationGracePeriodSeconds: nil,    TimeoutSeconds: 15,    TimeoutSeconds: 5,    TimeoutSeconds: 5,    Tolerations: nil,    Tolerations: nil,    TypeMeta: {},    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}},   &v1.StatefulSet{    Value: "",    ValueFrom: nil,    ValueFrom: &v1.EnvVarSource{    VolumeAttributesClassName: nil,    VolumeClaimTemplates: nil,    VolumeClaimTemplates: []v1.PersistentVolumeClaim{    VolumeDevices: nil,    VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}},    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}},    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...},    VolumeName: "",    VolumeSource: v1.VolumeSource{    Volumes: []v1.Volume{    VsphereVolume: nil,    WorkingDir: "", + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n monitoring-2-0-25359 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.mqsm0pvbat ++ mktemp + local LAST_ERR=/tmp/tmp.R1HXU3Gal6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mqsm0pvbat perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-25359 namespace + cat /tmp/tmp.R1HXU3Gal6 + rm /tmp/tmp.mqsm0pvbat /tmp/tmp.R1HXU3Gal6 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.3G4uXBUWXP ++ mktemp + local LAST_ERR=/tmp/tmp.0l2p2QDO1P + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3G4uXBUWXP No resources found + cat /tmp/tmp.0l2p2QDO1P + rm /tmp/tmp.3G4uXBUWXP /tmp/tmp.0l2p2QDO1P + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.rlE6xwaGTA ++ mktemp + local LAST_ERR=/tmp/tmp.O5UAxhWJqm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rlE6xwaGTA No resources found + cat /tmp/tmp.O5UAxhWJqm + rm /tmp/tmp.rlE6xwaGTA /tmp/tmp.O5UAxhWJqm + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.3vPJveYh4v ++ mktemp + local LAST_ERR=/tmp/tmp.v3xHTSVUK4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3vPJveYh4v validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.v3xHTSVUK4 + rm /tmp/tmp.3vPJveYh4v /tmp/tmp.v3xHTSVUK4 + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.19.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + rm -rf /tmp/tmp.7PFIFwbwjt + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-25359 ++ mktemp + local LAST_OUT=/tmp/tmp.yAbmjUARgF ++ mktemp + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_ERR=/tmp/tmp.ucme63zLQA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.I4u4ufGQB1 ++ mktemp + local LAST_ERR=/tmp/tmp.tZ748f4IJW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-25359 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yAbmjUARgF namespace "pxc-operator" force deleted + cat /tmp/tmp.ucme63zLQA Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. + rm /tmp/tmp.yAbmjUARgF /tmp/tmp.ucme63zLQA + return 0