Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/logs/monitoring-pmm3-8-4.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-pmm3-14440 + local ns=monitoring-pmm3-14440 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + grep -v NAMESPACE + kubectl patch pxc -n monitoring-pmm3-6606 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.NNjF3hxbvf ++ mktemp + local LAST_ERR=/tmp/tmp.X0yEYuU0Lo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NNjF3hxbvf perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-pmm3-6606 namespace + cat /tmp/tmp.X0yEYuU0Lo + rm /tmp/tmp.NNjF3hxbvf /tmp/tmp.X0yEYuU0Lo + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.eUbp7Jug1Z ++ mktemp + local LAST_ERR=/tmp/tmp.vYjaXqalao + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eUbp7Jug1Z No resources found + cat /tmp/tmp.vYjaXqalao + rm /tmp/tmp.eUbp7Jug1Z /tmp/tmp.vYjaXqalao + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.HilQHPC4NY ++ mktemp + local LAST_ERR=/tmp/tmp.Wpaf2b2w6s + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HilQHPC4NY No resources found + cat /tmp/tmp.Wpaf2b2w6s + rm /tmp/tmp.HilQHPC4NY /tmp/tmp.Wpaf2b2w6s + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// ++ tail -n1 ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.UYmx9TaW3D + xargs kubectl delete ns ++ mktemp + awk '{print$1}' + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.o0gVvuCCIL + local LAST_ERR=/tmp/tmp.FXmhfeqI7M + local exit_status=0 ++ mktemp + local LAST_ERR=/tmp/tmp.8BXx514Sqx + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.o0gVvuCCIL + cat /tmp/tmp.8BXx514Sqx + rm /tmp/tmp.o0gVvuCCIL /tmp/tmp.8BXx514Sqx + return 0 namespace "monitoring-pmm3-6606" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.UYmx9TaW3D namespace "pxc-operator" deleted + cat /tmp/tmp.FXmhfeqI7M + rm /tmp/tmp.UYmx9TaW3D /tmp/tmp.FXmhfeqI7M + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.Ud1HYDq4Uh ++ mktemp + local LAST_ERR=/tmp/tmp.8tgyl5Lshg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Ud1HYDq4Uh namespace/pxc-operator created + cat /tmp/tmp.8tgyl5Lshg + rm /tmp/tmp.Ud1HYDq4Uh /tmp/tmp.8tgyl5Lshg + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.2X5969Cui0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AfdjeYh24W ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2X5969Cui0 ++ cat /tmp/tmp.AfdjeYh24W ++ rm /tmp/tmp.2X5969Cui0 /tmp/tmp.AfdjeYh24W ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.yuRQH5WUuD ++ mktemp + local LAST_ERR=/tmp/tmp.sF0muWdfml + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yuRQH5WUuD Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5" modified. + cat /tmp/tmp.sF0muWdfml + rm /tmp/tmp.yuRQH5WUuD /tmp/tmp.sF0muWdfml + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.BqAyeYUkZi ++ mktemp + local LAST_ERR=/tmp/tmp.dcUFHVCYUp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BqAyeYUkZi customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.dcUFHVCYUp + rm /tmp/tmp.BqAyeYUkZi /tmp/tmp.dcUFHVCYUp + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-rbac.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4eqw211V6J ++ mktemp + local LAST_ERR=/tmp/tmp.wuE5dWg2o1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4eqw211V6J clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.wuE5dWg2o1 + rm /tmp/tmp.4eqw211V6J /tmp/tmp.wuE5dWg2o1 + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.x7b42lmWPN ++ mktemp + local LAST_ERR=/tmp/tmp.PoMYp4jnIq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.x7b42lmWPN deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.PoMYp4jnIq + rm /tmp/tmp.x7b42lmWPN /tmp/tmp.PoMYp4jnIq + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.v7pHLeSnm4 ++ mktemp + local LAST_ERR=/tmp/tmp.qb1AIeKef5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.v7pHLeSnm4 pod/percona-xtradb-cluster-operator-944bd69c8-lqt9v condition met + cat /tmp/tmp.qb1AIeKef5 + rm /tmp/tmp.v7pHLeSnm4 /tmp/tmp.qb1AIeKef5 + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' +++ mktemp ++ local LAST_OUT=/tmp/tmp.gInNihmGTI +++ mktemp ++ local LAST_ERR=/tmp/tmp.08D5FPDDN2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.gInNihmGTI ++ cat /tmp/tmp.08D5FPDDN2 ++ rm /tmp/tmp.gInNihmGTI /tmp/tmp.08D5FPDDN2 ++ return 0 + wait_pod percona-xtradb-cluster-operator-944bd69c8-lqt9v 480 pxc-operator + local pod=percona-xtradb-cluster-operator-944bd69c8-lqt9v + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-944bd69c8-lqt9v ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-944bd69c8-lqt9v condition met waiting for pod/percona-xtradb-cluster-operator-944bd69c8-lqt9v to become Ready.Ok + sleep 3 + create_namespace monitoring-pmm3-14440 + local namespace=monitoring-pmm3-14440 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-pmm3-14440' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-pmm3-14440 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-pmm3-14440 + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.jqsajcsoRm + local LAST_OUT=/tmp/tmp.6jT5nzGaIj ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.4OiQK8jgUb + local exit_status=0 + local LAST_ERR=/tmp/tmp.ufw9BhKs0s + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-14440 + awk '{print$1}' + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jqsajcsoRm + cat /tmp/tmp.4OiQK8jgUb + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + rm /tmp/tmp.jqsajcsoRm /tmp/tmp.4OiQK8jgUb + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-14440 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-14440 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.6jT5nzGaIj + cat /tmp/tmp.ufw9BhKs0s Error from server (NotFound): namespaces "monitoring-pmm3-14440" not found + rm /tmp/tmp.6jT5nzGaIj /tmp/tmp.ufw9BhKs0s + return 1 + : + wait_for_delete namespace/monitoring-pmm3-14440 + local res=namespace/monitoring-pmm3-14440 + echo -n 'waiting for namespace/monitoring-pmm3-14440 to be deleted' waiting for namespace/monitoring-pmm3-14440 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-pmm3-14440" not found + desc 'create namespace monitoring-pmm3-14440' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-pmm3-14440 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-pmm3-14440 ++ mktemp + local LAST_OUT=/tmp/tmp.9YmfL3z0O0 ++ mktemp + local LAST_ERR=/tmp/tmp.5pzPIc7lYh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-pmm3-14440 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9YmfL3z0O0 namespace/monitoring-pmm3-14440 created + cat /tmp/tmp.5pzPIc7lYh + rm /tmp/tmp.9YmfL3z0O0 /tmp/tmp.5pzPIc7lYh + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uw5VIDGxZh +++ mktemp ++ local LAST_ERR=/tmp/tmp.KQaJIu7oLv ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Uw5VIDGxZh ++ cat /tmp/tmp.KQaJIu7oLv ++ rm /tmp/tmp.Uw5VIDGxZh /tmp/tmp.KQaJIu7oLv ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=monitoring-pmm3-14440 ++ mktemp + local LAST_OUT=/tmp/tmp.MpINFkzKFF ++ mktemp + local LAST_ERR=/tmp/tmp.8QfMrsUuCj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5 --namespace=monitoring-pmm3-14440 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MpINFkzKFF Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster5" modified. + cat /tmp/tmp.8QfMrsUuCj + rm /tmp/tmp.MpINFkzKFF /tmp/tmp.8QfMrsUuCj + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.6GkT6cD5zy ++ mktemp + local LAST_ERR=/tmp/tmp.L5SfoBsPmX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6GkT6cD5zy secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.L5SfoBsPmX + rm /tmp/tmp.6GkT6cD5zy /tmp/tmp.L5SfoBsPmX + return 0 + deploy_helm monitoring-pmm3-14440 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm3_server + helm uninstall -n '' monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove percona "percona" has been removed from your repositories + kubectl delete clusterrole monitoring --ignore-not-found + kubectl delete clusterrolebinding monitoring --ignore-not-found + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "percona" chart repository Update Complete. ⎈Happy Helming!⎈ + '[' '!' -z '' ']' + retry 10 60 helm install monitoring percona/pmm -n '' --set fullnameOverride=monitoring --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring percona/pmm -n '' --set fullnameOverride=monitoring --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force NAME: monitoring LAST DEPLOYED: Wed Mar 11 09:22:05 2026 NAMESPACE: monitoring-pmm3-14440 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Percona Monitoring and Management (PMM) An open source database monitoring, observability and management tool Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html Get the application URL: NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace monitoring-pmm3-14440 svc -w monitoring-service' export SERVICE_IP=$(kubectl get svc --namespace monitoring-pmm3-14440 monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") echo https://$SERVICE_IP: Get password for the "admin" user: export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace monitoring-pmm3-14440 -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) echo $ADMIN_PASS + wait_for_pmm_service + timeout=420 ++ date +%s + start=1773220927 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220928 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220931 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220934 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220937 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220940 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220943 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220946 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220949 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220952 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220956 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220959 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + kubectl_bin wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s ++ mktemp + local LAST_OUT=/tmp/tmp.uINJAJHtq9 ++ mktemp + local LAST_ERR=/tmp/tmp.P1TDED2GHB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uINJAJHtq9 statefulset.apps/monitoring condition met + cat /tmp/tmp.P1TDED2GHB + rm /tmp/tmp.uINJAJHtq9 /tmp/tmp.P1TDED2GHB + return 0 + desc 'create secret' + set +o xtrace ----------------------------------------------------------------------------------- create secret ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.d2cCbdT7eg ++ mktemp + local LAST_ERR=/tmp/tmp.bdTaGZskrB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.d2cCbdT7eg secret/my-cluster-secrets created + cat /tmp/tmp.bdTaGZskrB + rm /tmp/tmp.d2cCbdT7eg /tmp/tmp.bdTaGZskrB + return 0 + desc 'add PMM3 token to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM3 token to secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator ++ local key_name=operator ++ [[ -z operator ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='r]vc,*8"MaA[R#j;' ++ [[ -z r]vc,*8"MaA[R#j; ]] ++ local create_response create_status_code create_json_response ++ local retry=0 ++ [[ '' == 201 ]] ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.VZcCrmDF46 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.Mm9pnPtUHr +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.VZcCrmDF46 +++++ cat /tmp/tmp.Mm9pnPtUHr +++++ rm /tmp/tmp.VZcCrmDF46 /tmp/tmp.Mm9pnPtUHr +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ApVNgyNABE +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.O780tW24KM ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.ApVNgyNABE ++++ cat /tmp/tmp.O780tW24KM ++++ rm /tmp/tmp.ApVNgyNABE /tmp/tmp.O780tW24KM ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.0X40P97cXh +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.H5yKi478t8 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.0X40P97cXh ++++ cat /tmp/tmp.H5yKi478t8 ++++ rm /tmp/tmp.0X40P97cXh /tmp/tmp.H5yKi478t8 ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ sleep 5 ++ let retry+=1 ++ '[' 1 -ge 24 ']' ++ [[ 201 == 201 ]] ++ local service_account_id +++ echo '{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=2 ++ [[ -z 2 ]] ++ [[ 2 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.9SGX9lih9N ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.yq94nildeO +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.9SGX9lih9N +++++ cat /tmp/tmp.yq94nildeO +++++ rm /tmp/tmp.9SGX9lih9N /tmp/tmp.yq94nildeO +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.NgiStfAEys +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.dx3oYS8o4n ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.NgiStfAEys ++++ cat /tmp/tmp.dx3oYS8o4n ++++ rm /tmp/tmp.NgiStfAEys /tmp/tmp.dx3oYS8o4n ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.DU5NwYo1XW +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8YJC0eOPYT ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.DU5NwYo1XW ++++ cat /tmp/tmp.8YJC0eOPYT ++++ rm /tmp/tmp.DU5NwYo1XW /tmp/tmp.8YJC0eOPYT ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' ++ token_response='{"id":1,"name":"operator","key":"glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"} 200' +++ echo '{"id":1,"name":"operator","key":"glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":1,"name":"operator","key":"glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"} 200' +++ sed '$ d' ++ token_json_response='{"id":1,"name":"operator","key":"glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"}' ++ [[ 200 -ne 200 ]] ++ jq -r .key ++ echo '{"id":1,"name":"operator","key":"glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"}' + TOKEN=glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6 + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.ZfvHKfyHKj ++ mktemp + local LAST_ERR=/tmp/tmp.1BmFLynyWQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_UlVvOez4gX9yPN1DTfsnB10KqcYpma9U_2d41c9b6"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ZfvHKfyHKj secret/my-cluster-secrets patched + cat /tmp/tmp.1BmFLynyWQ + rm /tmp/tmp.ZfvHKfyHKj /tmp/tmp.1BmFLynyWQ + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml 3 120 + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local size=3 + local sleep=120 + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup#' ++ mktemp + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-pmm3-14440~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4#' + local LAST_OUT=/tmp/tmp.0k0yXAcVux ++ mktemp + local LAST_ERR=/tmp/tmp.XMufsEgLAT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0k0yXAcVux deployment.apps/pxc-client created + cat /tmp/tmp.XMufsEgLAT + rm /tmp/tmp.0k0yXAcVux /tmp/tmp.XMufsEgLAT + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local pvc_name= + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.jksUfg7F7t + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4-backup#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-pmm3-14440~ + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + local LAST_ERR=/tmp/tmp.n6hgCxccb0 + local exit_status=0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ seq 0 2 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.4#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jksUfg7F7t perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.n6hgCxccb0 + rm /tmp/tmp.jksUfg7F7t /tmp/tmp.n6hgCxccb0 + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Ldakg0qGAi ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WXD7MlxpMI +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Ldakg0qGAi +++ cat /tmp/tmp.WXD7MlxpMI +++ rm /tmp/tmp.Ldakg0qGAi /tmp/tmp.WXD7MlxpMI +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-pmm3-14440 ++ mktemp + local LAST_OUT=/tmp/tmp.5OEUY5NtOe ++ mktemp + local LAST_ERR=/tmp/tmp.jW1fTOYk71 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-pmm3-14440 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-pmm3-14440 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5OEUY5NtOe pod/monitoring-haproxy-0 condition met pod/monitoring-pxc-0 condition met + cat /tmp/tmp.jW1fTOYk71 + rm /tmp/tmp.5OEUY5NtOe /tmp/tmp.jW1fTOYk71 + return 0 + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.BQq3l03hYE +++ mktemp ++ local LAST_ERR=/tmp/tmp.Iih0TGWw0B ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BQq3l03hYE ++ cat /tmp/tmp.Iih0TGWw0B ++ rm /tmp/tmp.BQq3l03hYE /tmp/tmp.Iih0TGWw0B ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w2uYHGVOvi +++ mktemp ++ local LAST_ERR=/tmp/tmp.n5bQ8hGyzw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.w2uYHGVOvi ++ cat /tmp/tmp.n5bQ8hGyzw ++ rm /tmp/tmp.w2uYHGVOvi /tmp/tmp.n5bQ8hGyzw ++ return 0 + client_pod=pxc-client-56fd5498cd-hfhjm + wait_pod pxc-client-56fd5498cd-hfhjm + local pod=pxc-client-56fd5498cd-hfhjm + local max_retry=480 + local ns= ++ echo pxc-client-56fd5498cd-hfhjm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-56fd5498cd-hfhjm condition met waiting for pod/pxc-client-56fd5498cd-hfhjm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.VGEv7fT7gG +++ mktemp ++ local LAST_ERR=/tmp/tmp.hx7RcG5GhD ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.VGEv7fT7gG ++ cat /tmp/tmp.hx7RcG5GhD ++ rm /tmp/tmp.VGEv7fT7gG /tmp/tmp.hx7RcG5GhD ++ return 0 + client_pod=pxc-client-56fd5498cd-hfhjm + wait_pod pxc-client-56fd5498cd-hfhjm + local pod=pxc-client-56fd5498cd-hfhjm + local max_retry=480 + local ns= ++ echo pxc-client-56fd5498cd-hfhjm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-56fd5498cd-hfhjm condition met waiting for pod/pxc-client-56fd5498cd-hfhjm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-84.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.u2K9oZcz2t +++ mktemp ++ local LAST_ERR=/tmp/tmp.UPva2HKtQI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.u2K9oZcz2t ++ cat /tmp/tmp.UPva2HKtQI ++ rm /tmp/tmp.u2K9oZcz2t /tmp/tmp.UPva2HKtQI ++ return 0 + client_pod=pxc-client-56fd5498cd-hfhjm + wait_pod pxc-client-56fd5498cd-hfhjm + local pod=pxc-client-56fd5498cd-hfhjm + local max_retry=480 + local ns= ++ echo pxc-client-56fd5498cd-hfhjm ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-56fd5498cd-hfhjm condition met waiting for pod/pxc-client-56fd5498cd-hfhjm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.33j6bVDI1N/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.33j6bVDI1N/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.33j6bVDI1N/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-84.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rqy5hcJXhE +++ mktemp ++ local LAST_ERR=/tmp/tmp.sBSzLAgDx3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Rqy5hcJXhE ++ cat /tmp/tmp.sBSzLAgDx3 ++ rm /tmp/tmp.Rqy5hcJXhE /tmp/tmp.sBSzLAgDx3 ++ return 0 + client_pod=pxc-client-56fd5498cd-hfhjm + wait_pod pxc-client-56fd5498cd-hfhjm + local pod=pxc-client-56fd5498cd-hfhjm + local max_retry=480 + local ns= ++ echo pxc-client-56fd5498cd-hfhjm ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-56fd5498cd-hfhjm condition met waiting for pod/pxc-client-56fd5498cd-hfhjm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.33j6bVDI1N/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.33j6bVDI1N/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.33j6bVDI1N/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-84.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0IHLuHPyFd +++ mktemp ++ local LAST_ERR=/tmp/tmp.6KimCYuZw9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0IHLuHPyFd ++ cat /tmp/tmp.6KimCYuZw9 ++ rm /tmp/tmp.0IHLuHPyFd /tmp/tmp.6KimCYuZw9 ++ return 0 + client_pod=pxc-client-56fd5498cd-hfhjm + wait_pod pxc-client-56fd5498cd-hfhjm + local pod=pxc-client-56fd5498cd-hfhjm + local max_retry=480 + local ns= ++ echo pxc-client-56fd5498cd-hfhjm ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-56fd5498cd-hfhjm condition met waiting for pod/pxc-client-56fd5498cd-hfhjm to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.33j6bVDI1N/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.33j6bVDI1N/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.33j6bVDI1N/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] ++ kubectl exec -it monitoring-pxc-0 -c pxc -- ls /var/lib/mysql/mysqld.my ++ return 1 + '[' '' ']' + wait_for_generation sts/monitoring-pxc 1 + local resource=sts/monitoring-pxc + local target_generation=1 + echo 'Waiting for sts/monitoring-pxc to reach generation 1...' Waiting for sts/monitoring-pxc to reach generation 1... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 1 ']' + echo 'Resource sts/monitoring-pxc has reached generation 1.' Resource sts/monitoring-pxc has reached generation 1. + break + wait_for_generation sts/monitoring-haproxy 1 + local resource=sts/monitoring-haproxy + local target_generation=1 + echo 'Waiting for sts/monitoring-haproxy to reach generation 1...' Waiting for sts/monitoring-haproxy to reach generation 1... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 1 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 1.' Resource sts/monitoring-haproxy has reached generation 1. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/monitoring to be ready' waiting for pxc/monitoring to be ready++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.c0ZDZTCQrw +++ mktemp ++ local LAST_ERR=/tmp/tmp.JTkgg8eW9s ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.c0ZDZTCQrw ++ cat /tmp/tmp.JTkgg8eW9s ++ rm /tmp/tmp.c0ZDZTCQrw /tmp/tmp.JTkgg8eW9s ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ueLyBaKBp8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.fjIIHjar2G ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ueLyBaKBp8 ++ cat /tmp/tmp.fjIIHjar2G ++ rm /tmp/tmp.ueLyBaKBp8 /tmp/tmp.fjIIHjar2G ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.84yOF9kBMZ ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.JtfwY3Fzmf +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.84yOF9kBMZ +++++ cat /tmp/tmp.JtfwY3Fzmf +++++ rm /tmp/tmp.84yOF9kBMZ /tmp/tmp.JtfwY3Fzmf +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.62MHEFz2IX +++ mktemp ++ local LAST_ERR=/tmp/tmp.QQgeFnGp9v ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.62MHEFz2IX ++ cat /tmp/tmp.QQgeFnGp9v ++ rm /tmp/tmp.62MHEFz2IX /tmp/tmp.QQgeFnGp9v ++ return 0 + [[ 2 == \2 ]] + echo + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.33j6bVDI1N/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-84.yml ']' + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-14440", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.igK89Ro3if ++ mktemp + local LAST_ERR=/tmp/tmp.LZm5Ipp4fO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.igK89Ro3if + cat /tmp/tmp.LZm5Ipp4fO + rm /tmp/tmp.igK89Ro3if /tmp/tmp.LZm5Ipp4fO + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml /tmp/tmp.33j6bVDI1N/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:31:40+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-03-11T09:31:40+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.33j6bVDI1N/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-84.yml ']' + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-14440", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.Z2F9UqfcCZ ++ mktemp + local LAST_ERR=/tmp/tmp.XMEY02v0IY + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Z2F9UqfcCZ + cat /tmp/tmp.XMEY02v0IY + rm /tmp/tmp.Z2F9UqfcCZ /tmp/tmp.XMEY02v0IY + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml /tmp/tmp.33j6bVDI1N/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:31:42+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-03-11T09:31:42+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.F48jAwHUsZ ++ mktemp + local LAST_ERR=/tmp/tmp.5Gfnr6niPl + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.F48jAwHUsZ secret/my-env-var-secrets created + cat /tmp/tmp.5Gfnr6niPl + rm /tmp/tmp.F48jAwHUsZ /tmp/tmp.5Gfnr6niPl + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 2 ']' + echo 'Resource sts/monitoring-pxc is at generation 1. Waiting...' Resource sts/monitoring-pxc is at generation 1. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + desc 'create new PMM token and add it to the secret' + set +o xtrace ----------------------------------------------------------------------------------- create new PMM token and add it to the secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator_new ++ local key_name=operator_new ++ [[ -z operator_new ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='r]vc,*8"MaA[R#j;' ++ [[ -z r]vc,*8"MaA[R#j; ]] ++ local create_response create_status_code create_json_response ++ local retry=0 ++ [[ '' == 201 ]] ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.i2UaAOtWnL ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.pCSKtRUyrH +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.i2UaAOtWnL +++++ cat /tmp/tmp.pCSKtRUyrH +++++ rm /tmp/tmp.i2UaAOtWnL /tmp/tmp.pCSKtRUyrH +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zDzY0wK2YS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GiOucM2hHH ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.zDzY0wK2YS ++++ cat /tmp/tmp.GiOucM2hHH ++++ rm /tmp/tmp.zDzY0wK2YS /tmp/tmp.GiOucM2hHH ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.JBIDZjVlW8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.XjIzW44fGJ ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.JBIDZjVlW8 ++++ cat /tmp/tmp.XjIzW44fGJ ++++ rm /tmp/tmp.JBIDZjVlW8 /tmp/tmp.XjIzW44fGJ ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator_new", "role":"Admin", "isDisabled":false}' --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ sleep 5 ++ let retry+=1 ++ '[' 1 -ge 24 ']' ++ [[ 201 == 201 ]] ++ local service_account_id +++ echo '{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=3 ++ [[ -z 3 ]] ++ [[ 3 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.02oerjkZGr ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.xPJSUR0ckm +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.02oerjkZGr +++++ cat /tmp/tmp.xPJSUR0ckm +++++ rm /tmp/tmp.02oerjkZGr /tmp/tmp.xPJSUR0ckm +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yAetYlvtQi +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.E5bTRWZO0i ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.yAetYlvtQi ++++ cat /tmp/tmp.E5bTRWZO0i ++++ rm /tmp/tmp.yAetYlvtQi /tmp/tmp.E5bTRWZO0i ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.SsLfbm2RwS +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.7jEakk9Tpy ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.SsLfbm2RwS ++++ cat /tmp/tmp.7jEakk9Tpy ++++ rm /tmp/tmp.SsLfbm2RwS /tmp/tmp.7jEakk9Tpy ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator_new"}' --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' ++ token_response='{"id":2,"name":"operator_new","key":"glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"} 200' +++ echo '{"id":2,"name":"operator_new","key":"glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":2,"name":"operator_new","key":"glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"} 200' +++ sed '$ d' ++ token_json_response='{"id":2,"name":"operator_new","key":"glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"}' ++ [[ 200 -ne 200 ]] ++ echo '{"id":2,"name":"operator_new","key":"glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"}' ++ jq -r .key + NEW_TOKEN=glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.kgydcCuvTd ++ mktemp + local LAST_ERR=/tmp/tmp.4FzkhscXzp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kgydcCuvTd secret/my-cluster-secrets patched + cat /tmp/tmp.4FzkhscXzp + rm /tmp/tmp.kgydcCuvTd /tmp/tmp.4FzkhscXzp + return 0 + desc 'delete old PMM token' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM token ----------------------------------------------------------------------------------- + delete_pmm_server_token operator + local key_name=operator + [[ -z operator ]] + local ADMIN_PASSWORD ++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' ++ base64 --decode + ADMIN_PASSWORD='r]vc,*8"MaA[R#j;' + [[ -z r]vc,*8"MaA[R#j; ]] + local 'user_credentials=admin:r]vc,*8"MaA[R#j;' + local service_accounts_response service_accounts_status +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ETXM1xM0sj +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.4mXVfU9mE3 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.ETXM1xM0sj ++++ cat /tmp/tmp.4mXVfU9mE3 ++++ rm /tmp/tmp.ETXM1xM0sj /tmp/tmp.4mXVfU9mE3 ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bK9zyjWXxZ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1U5ITiqEXl +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.bK9zyjWXxZ +++ cat /tmp/tmp.1U5ITiqEXl +++ rm /tmp/tmp.bK9zyjWXxZ /tmp/tmp.1U5ITiqEXl +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2XrByMaCAe ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MP3t6FQbvz +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2XrByMaCAe +++ cat /tmp/tmp.MP3t6FQbvz +++ rm /tmp/tmp.2XrByMaCAe /tmp/tmp.MP3t6FQbvz +++ return 0 ++ curl --insecure -s -X GET --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts/search -w '\n%{http_code}' + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ tail -n1 + service_accounts_status=200 ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ sed '$ d' + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' + [[ 200 -ne 200 ]] + local service_account_id ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"effok7f256igwe","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"affokxeoqi48we","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' + service_account_id=2 + [[ -z 2 ]] + [[ 2 == \n\u\l\l ]] + local tokens_response tokens_status tokens_json +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.HNJN4uhyGL +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.F3sdBqQHPV ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.HNJN4uhyGL ++++ cat /tmp/tmp.F3sdBqQHPV ++++ rm /tmp/tmp.HNJN4uhyGL /tmp/tmp.F3sdBqQHPV ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.s78FlnByt9 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.NOwF068p4z +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.s78FlnByt9 +++ cat /tmp/tmp.NOwF068p4z +++ rm /tmp/tmp.s78FlnByt9 /tmp/tmp.NOwF068p4z +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.HXHTr6Yk6k ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DflTqd5ezq +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.HXHTr6Yk6k +++ cat /tmp/tmp.DflTqd5ezq +++ rm /tmp/tmp.HXHTr6Yk6k /tmp/tmp.DflTqd5ezq +++ return 0 ++ curl --insecure -s -X GET --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' + tokens_response='[{"id":1,"name":"operator","created":"2026-03-11T09:24:00Z","lastUsedAt":"2026-03-11T09:29:17Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:24:00Z","lastUsedAt":"2026-03-11T09:29:17Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ tail -n1 + tokens_status=200 ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:24:00Z","lastUsedAt":"2026-03-11T09:29:17Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ sed '$ d' + tokens_json='[{"id":1,"name":"operator","created":"2026-03-11T09:24:00Z","lastUsedAt":"2026-03-11T09:29:17Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' + [[ 200 -ne 200 ]] + local token_id ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:24:00Z","lastUsedAt":"2026-03-11T09:29:17Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' ++ jq -r '.[] | select(.name == "operator").id' + token_id=1 + [[ -z 1 ]] + [[ 1 == \n\u\l\l ]] + local delete_response delete_status +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.xDnV9O14Bd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DgM3ptWLRd ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.xDnV9O14Bd ++++ cat /tmp/tmp.DgM3ptWLRd ++++ rm /tmp/tmp.xDnV9O14Bd /tmp/tmp.DgM3ptWLRd ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.F6Nzh6fQgy ++++ mktemp +++ local LAST_ERR=/tmp/tmp.D0SGhiPKBj +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.F6Nzh6fQgy +++ cat /tmp/tmp.D0SGhiPKBj +++ rm /tmp/tmp.F6Nzh6fQgy /tmp/tmp.D0SGhiPKBj +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.QcBbTV0BhH ++++ mktemp +++ local LAST_ERR=/tmp/tmp.7BDtyZHgTX +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.QcBbTV0BhH +++ cat /tmp/tmp.7BDtyZHgTX +++ rm /tmp/tmp.QcBbTV0BhH /tmp/tmp.7BDtyZHgTX +++ return 0 ++ curl --insecure -s -X DELETE --user 'admin:r]vc,*8"MaA[R#j;' https://34.122.19.231/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' + delete_response='{"message":"Service account token deleted"} 200' ++ echo '{"message":"Service account token deleted"} 200' ++ tail -n1 + delete_status=200 + [[ 200 -ne 200 ]] + wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.33j6bVDI1N/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-84.yml ']' + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k129.yml ']' + version_gt 1.27 ++ bc -l ++ echo '1.32 >= 1.27' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.F6DxEHf9AB ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-14440", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_ERR=/tmp/tmp.B3TMit9asB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.F6DxEHf9AB + cat /tmp/tmp.B3TMit9asB + rm /tmp/tmp.F6DxEHf9AB /tmp/tmp.B3TMit9asB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml /tmp/tmp.33j6bVDI1N/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:35:29+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-03-11T09:35:29+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.33j6bVDI1N/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.4 =~ 8\.4 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-84.yml ']' + version_gt 1.33 ++ bc -l ++ echo '1.32 >= 1.33' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ bc -l ++ echo '1.32 >= 1.29' + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-14440", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.IJw9bQaSVP ++ mktemp + local LAST_ERR=/tmp/tmp.uykiWyuSCB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.IJw9bQaSVP + cat /tmp/tmp.uykiWyuSCB + rm /tmp/tmp.IJw9bQaSVP /tmp/tmp.uykiWyuSCB + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml /tmp/tmp.33j6bVDI1N/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:35:31+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-03-11T09:35:31+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + sleep 60 + get_metric_values_pmm3 node_boot_time_seconds pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0 glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0 + local token=glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221731 ++ /usr/bin/date -u +%s + local end=1773221791 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.MnyQUn6Ivz ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GdXIo5w0WM +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.MnyQUn6Ivz +++ cat /tmp/tmp.GdXIo5w0WM +++ rm /tmp/tmp.MnyQUn6Ivz /tmp/tmp.GdXIo5w0WM +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ezRGSpzkXQ ++++ mktemp +++ local LAST_ERR=/tmp/tmp.2cObkTQRpl +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ezRGSpzkXQ +++ cat /tmp/tmp.2cObkTQRpl +++ rm /tmp/tmp.ezRGSpzkXQ /tmp/tmp.2cObkTQRpl +++ return 0 ++ endpoint=34.122.19.231 ++ '[' -n 34.122.19.231 ']' ++ '[' 34.122.19.231 '!=' null ']' ++ echo 34.122.19.231 ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ return + local endpoint=34.122.19.231 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221731&end=1773221791&step=60' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221737 ++ /usr/bin/date -u +%s + local end=1773221797 + let retry+=1 + [[ 1 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221737&end=1773221797&step=60' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221739 ++ /usr/bin/date -u +%s + local end=1773221799 + let retry+=1 + [[ 2 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221739&end=1773221799&step=60' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221742 ++ /usr/bin/date -u +%s + local end=1773221802 + let retry+=1 + [[ 3 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221742&end=1773221802&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221744 ++ /usr/bin/date -u +%s + local end=1773221804 + let retry+=1 + [[ 4 -ge 30 ]] ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221744&end=1773221804&step=60' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221747 ++ /usr/bin/date -u +%s + local end=1773221807 + let retry+=1 + [[ 5 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221747&end=1773221807&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221749 ++ /usr/bin/date -u +%s + local end=1773221809 + let retry+=1 + [[ 6 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221749&end=1773221809&step=60' + [[ -n "1773214507" ]] + get_metric_values_pmm3 mysql_global_status_uptime pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0 glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0 + local token=glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221750 ++ /usr/bin/date -u +%s + local end=1773221810 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.hTQIdmyVWV ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wKT2sGH3mV +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.hTQIdmyVWV +++ cat /tmp/tmp.wKT2sGH3mV +++ rm /tmp/tmp.hTQIdmyVWV /tmp/tmp.wKT2sGH3mV +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.eYa2slIauX ++++ mktemp +++ local LAST_ERR=/tmp/tmp.IGbH3l4YkF +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.eYa2slIauX +++ cat /tmp/tmp.IGbH3l4YkF +++ rm /tmp/tmp.eYa2slIauX /tmp/tmp.IGbH3l4YkF +++ return 0 ++ endpoint=34.122.19.231 ++ '[' -n 34.122.19.231 ']' ++ '[' 34.122.19.231 '!=' null ']' ++ echo 34.122.19.231 ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ return + local endpoint=34.122.19.231 + '[' -z mysql_global_status_uptime ']' + '[' -z glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221750&end=1773221810&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221755 ++ /usr/bin/date -u +%s + local end=1773221815 + let retry+=1 + [[ 1 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221755&end=1773221815&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221758 ++ /usr/bin/date -u +%s + local end=1773221818 + let retry+=1 + [[ 2 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-pxc-0%22%7D%29&start=1773221758&end=1773221818&step=60' ++ grep '^"[0-9]' + [[ -n "5" ]] + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values_pmm3 haproxy_backend_status pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0 glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0 + local token=glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221758 ++ /usr/bin/date -u +%s + local end=1773221818 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m2bGw7LKua ++++ mktemp +++ local LAST_ERR=/tmp/tmp.CbzBpQiNru +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.m2bGw7LKua +++ cat /tmp/tmp.CbzBpQiNru +++ rm /tmp/tmp.m2bGw7LKua /tmp/tmp.CbzBpQiNru +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5kUITeiGvP ++++ mktemp +++ local LAST_ERR=/tmp/tmp.teXlfuHy2m +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.5kUITeiGvP +++ cat /tmp/tmp.teXlfuHy2m +++ rm /tmp/tmp.5kUITeiGvP /tmp/tmp.teXlfuHy2m +++ return 0 ++ endpoint=34.122.19.231 ++ '[' -n 34.122.19.231 ']' ++ '[' 34.122.19.231 '!=' null ']' ++ echo 34.122.19.231 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=34.122.19.231 + '[' -z haproxy_backend_status ']' + '[' -z glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0%22%7D%29&start=1773221758&end=1773221818&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "0" "0" ]] + get_metric_values_pmm3 haproxy_backend_active_servers pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0 glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0 + local token=glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221762 ++ /usr/bin/date -u +%s + local end=1773221822 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.9LMxEv61Z6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.GKc30xWOqg +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.9LMxEv61Z6 +++ cat /tmp/tmp.GKc30xWOqg +++ rm /tmp/tmp.9LMxEv61Z6 /tmp/tmp.GKc30xWOqg +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.uCvXtNY5w1 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.jBuN1rq9dv +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.uCvXtNY5w1 +++ cat /tmp/tmp.jBuN1rq9dv +++ rm /tmp/tmp.uCvXtNY5w1 /tmp/tmp.jBuN1rq9dv +++ return 0 ++ endpoint=34.122.19.231 ++ '[' -n 34.122.19.231 ']' ++ '[' 34.122.19.231 '!=' null ']' ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ echo 34.122.19.231 ++ return + local endpoint=34.122.19.231 + '[' -z haproxy_backend_active_servers ']' + '[' -z glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_2nFqYhmFE9Clq7RhQyRVD9rEY4uOmNhI_7413bda9' 'https://34.122.19.231/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-14440-monitoring-haproxy-0%22%7D%29&start=1773221762&end=1773221822&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' + [[ -n "1" "0" ]] + desc 'switch from haproxy to proxysql' + set +o xtrace ----------------------------------------------------------------------------------- switch from haproxy to proxysql ----------------------------------------------------------------------------------- + kubectl_bin patch pxc monitoring --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.fXNI0Q87jC ++ mktemp + local LAST_ERR=/tmp/tmp.WCtFe2cTmp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fXNI0Q87jC perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.WCtFe2cTmp + rm /tmp/tmp.fXNI0Q87jC /tmp/tmp.WCtFe2cTmp + return 0 + wait_for_delete sts/monitoring-haproxy + local res=sts/monitoring-haproxy + echo -n 'waiting for sts/monitoring-haproxy to be deleted' waiting for sts/monitoring-haproxy to be deleted+ set +o xtrace ........................................................................................................................2026-03-11T09:21:27.229Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.32.12-gke.1127000"} 2026-03-11T09:21:27.229Z INFO setup Manager starting up {"gitCommit": "7f4bfbf44130eef78e7b2b7137fa04bd4427267a", "gitBranch": "PR-2384-7f4bfbf4", "buildTime": "2026-03-11T07:27:21Z", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} 2026-03-11T09:21:27.229Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2026-03-11T09:21:27.232Z INFO setup Registering Components. 2026-03-11T09:21:27.906Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2026-03-11T09:21:27.906Z INFO setup Starting the Cmd. 2026-03-11T09:21:27.907Z INFO controller-runtime.metrics Starting metrics server 2026-03-11T09:21:27.907Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2026-03-11T09:21:27.907Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2026-03-11T09:21:27.907Z INFO controller-runtime.webhook Starting webhook server 2026-03-11T09:21:27.907Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2026-03-11T09:21:27.907Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2026-03-11T09:21:27.907Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2026-03-11T09:21:28.008Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2026-03-11T09:21:28.036Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2026-03-11T09:21:28.036Z DEBUG events percona-xtradb-cluster-operator-944bd69c8-lqt9v_89841dec-110d-4b99-a156-3f8a220a2adc became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"1d44d33e-4689-4aed-b9b7-ae225187d33f","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1773220888030431009"}, "reason": "LeaderElection"} 2026-03-11T09:21:28.036Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2026-03-11T09:21:28.036Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2026-03-11T09:21:28.036Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2026-03-11T09:21:28.036Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2026-03-11T09:21:28.137Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2026-03-11T09:21:28.137Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2026-03-11T09:21:28.238Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2026-03-11T09:21:28.238Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2026-03-11T09:21:28.238Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2026-03-11T09:21:28.238Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2026-03-11T09:24:05.624Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "version": "1.20.0"} 2026-03-11T09:24:07.907Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-03-11T09:24:07.947Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-03-11T09:24:07.996Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:24:08.048Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:24:08.095Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:24:08.195Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "e7b31a9a-a111-4b5c-b1dd-390f94211d83", "object": "monitoring-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:24:09.086Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "78a371c3-273a-47f4-8ec9-0ea90783429d", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-03-11T09:24:09.117Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "78a371c3-273a-47f4-8ec9-0ea90783429d", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-03-11T09:25:30.124Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f", "user": "operator"} 2026-03-11T09:25:30.161Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f", "user": "monitor"} 2026-03-11T09:25:30.223Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f"} 2026-03-11T09:25:30.262Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f"} 2026-03-11T09:25:30.304Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f", "user": "xtrabackup"} 2026-03-11T09:25:30.355Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f"} 2026-03-11T09:25:30.404Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f", "user": "replication"} 2026-03-11T09:25:47.415Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "424107b8-72bb-4615-ae66-8ec437ffa41f", "err": "get primary pxc pod: failed to get proxy connection: dial tcp 34.118.237.159:3306: i/o timeout"} 2026-03-11T09:28:03.016Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "0a80e369-4b93-4cb9-b44c-13c64fc21739", "user": "root"} 2026-03-11T09:28:03.116Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "0a80e369-4b93-4cb9-b44c-13c64fc21739", "new version": "8.4.7-7.1"} 2026-03-11T09:31:47.114Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "f7afd4a8-4524-4aec-b7d1-cdc9047b1a70", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true}   &v1.StatefulSet{    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    ObjectMeta: v1.ObjectMeta{    ... // 2 identical fields    Namespace: "monitoring-pmm3-14440",    SelfLink: "", -  UID: "80339a3a-7837-48c5-aae0-4776b8bdda2f", +  UID: "", -  ResourceVersion: "1773221281680127006", +  ResourceVersion: "", -  Generation: 1, +  Generation: 0, -  CreationTimestamp: v1.Time{Time: s"2026-03-11 09:24:07 +0000 UTC"}, +  CreationTimestamp: v1.Time{},    DeletionTimestamp: nil,    DeletionGracePeriodSeconds: nil,    Labels: nil, -  Annotations: map[string]string{ -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiI2MWM5N2JiNWQyNTY4Zjk5YjcxOGUxZDRhYTk5ZjhiNSIsInBlcmNvbmEu"..., -  }, +  Annotations: map[string]string{ +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., +  },    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "2ac7d1af-9cac-4e40-a153-80f1aef7bb35", ...}},    Finalizers: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  { -  Manager: "percona-xtradb-cluster-operator", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:24:07 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  }, -  { -  Manager: "kube-controller-manager", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:28:01 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., -  Subresource: "status", -  }, -  }, +  ManagedFields: nil,    },    Spec: v1.StatefulSetSpec{    Replicas: &3,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Template: v1.PodTemplateSpec{    ObjectMeta: v1.ObjectMeta{    ... // 9 identical fields    DeletionGracePeriodSeconds: nil,    Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Annotations: map[string]string{    "kubectl.kubernetes.io/default-container": "pxc",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", +  "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    "percona.com/ssl-hash": "61c97bb5d2568f99b718e1d4aa99f8b5",    "percona.com/ssl-internal-hash": "c448d451bf2b6b5a1ba4e65cbe37cebe",    },    OwnerReferences: nil,    Finalizers: nil,    ManagedFields: nil,    },    Spec: v1.PodSpec{    Volumes: []v1.Volume{    {Name: "tmp", VolumeSource: {EmptyDir: &{}}},    {    Name: "config",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "monitoring-pxc"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {    Name: "ssl-internal",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-ssl-internal",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "ssl",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "some-name-ssl",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "auto-config",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "auto-monitoring-pxc"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {    Name: "vault-keyring-secret",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-vault",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "mysql-users-secret-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "internal-monitoring",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "mysql-init-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-mysql-init",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    },    InitContainers: []v1.Container{    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    Containers: []v1.Container{    {    ... // 3 identical fields    Args: nil,    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "",    HostPort: 0,    ContainerPort: 7777, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30100, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30101, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30102, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30103, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30104, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30105, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: []v1.EnvVar{    {    Name: "POD_NAME",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.name",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {    Name: "POD_NAMESPACE",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.namespace",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"},    ... // 18 identical elements    {Name: "DB_HOST", Value: "localhost"},    {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"},    {    Name: "PMM_AGENT_SETUP_NODE_NAME", -  Value: "$(POD_NAMESPACE)-$(POD_NAME)", +  Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)",    ValueFrom: nil,    },    {Name: "DB_PORT", Value: "33062"},    {Name: "DB_TYPE", Value: "mysql"},    ... // 2 identical elements    },    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    ResizePolicy: nil,    ... // 2 identical fields    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 300,    TimeoutSeconds: 5,    ... // 4 identical fields    },    ReadinessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 15,    TimeoutSeconds: 15,    ... // 4 identical fields    },    StartupProbe: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 3 identical fields    Args: {"mysqld"},    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "mysql",    HostPort: 0,    ContainerPort: 3306, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "sst",    HostPort: 0,    ContainerPort: 4444, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "write-set",    HostPort: 0,    ContainerPort: 4567, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "ist",    HostPort: 0,    ContainerPort: 4568, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-admin",    HostPort: 0,    ContainerPort: 33062, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysqlx",    HostPort: 0,    ContainerPort: 33060, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...},    ... // 4 identical fields    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}},    InitialDelaySeconds: 300,    TimeoutSeconds: 5, -  PeriodSeconds: 10, +  PeriodSeconds: 0,    SuccessThreshold: 1,    FailureThreshold: 3,    TerminationGracePeriodSeconds: nil,    },    ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...},    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    EphemeralContainers: nil, -  RestartPolicy: "Always", +  RestartPolicy: "",    TerminationGracePeriodSeconds: &600,    ActiveDeadlineSeconds: nil, -  DNSPolicy: "ClusterFirst", +  DNSPolicy: "",    NodeSelector: nil,    ServiceAccountName: "default", -  DeprecatedServiceAccount: "default", +  DeprecatedServiceAccount: "",    AutomountServiceAccountToken: nil,    NodeName: "",    ... // 7 identical fields    Subdomain: "",    Affinity: nil, -  SchedulerName: "default-scheduler", +  SchedulerName: "",    Tolerations: nil,    HostAliases: nil,    ... // 6 identical fields    PreemptionPolicy: nil,    Overhead: nil, -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{},    SetHostnameAsFQDN: nil,    OS: nil,    ... // 6 identical fields    },    },    VolumeClaimTemplates: []v1.PersistentVolumeClaim{    {    TypeMeta: {},    ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Spec: v1.PersistentVolumeClaimSpec{    ... // 3 identical fields    VolumeName: "",    StorageClassName: nil, -  VolumeMode: &"Filesystem", +  VolumeMode: nil,    DataSource: nil,    DataSourceRef: nil,    VolumeAttributesClassName: nil,    },    Status: v1.PersistentVolumeClaimStatus{ -  Phase: "Pending", +  Phase: "",    AccessModes: nil,    Capacity: nil,    ... // 5 identical fields    },    },    },    ServiceName: "monitoring-pxc", -  PodManagementPolicy: "OrderedReady", +  PodManagementPolicy: "",    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil,    MinReadySeconds: 0, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  PersistentVolumeClaimRetentionPolicy: nil,    Ordinals: nil,    },    Status: v1.StatefulSetStatus{ -  ObservedGeneration: 1, +  ObservedGeneration: 0, -  Replicas: 3, +  Replicas: 0, -  ReadyReplicas: 3, +  ReadyReplicas: 0, -  CurrentReplicas: 3, +  CurrentReplicas: 0, -  UpdatedReplicas: 3, +  UpdatedReplicas: 0, -  CurrentRevision: "monitoring-pxc-5dcb897cff", +  CurrentRevision: "", -  UpdateRevision: "monitoring-pxc-5dcb897cff", +  UpdateRevision: "", -  CollisionCount: &0, +  CollisionCount: nil,    Conditions: nil, -  AvailableReplicas: 3, +  AvailableReplicas: 0,    },   } 2026-03-11T09:31:47.268Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "f7afd4a8-4524-4aec-b7d1-cdc9047b1a70", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true}   &v1.StatefulSet{    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    ObjectMeta: v1.ObjectMeta{    ... // 2 identical fields    Namespace: "monitoring-pmm3-14440",    SelfLink: "", -  UID: "4dadf6c2-900d-43b7-bdd8-7567844e3192", +  UID: "", -  ResourceVersion: "1773221155033183017", +  ResourceVersion: "", -  Generation: 1, +  Generation: 0, -  CreationTimestamp: v1.Time{Time: s"2026-03-11 09:24:07 +0000 UTC"}, +  CreationTimestamp: v1.Time{},    DeletionTimestamp: nil,    DeletionGracePeriodSeconds: nil,    Labels: nil, -  Annotations: map[string]string{ -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., -  }, +  Annotations: map[string]string{ +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., +  },    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "2ac7d1af-9cac-4e40-a153-80f1aef7bb35", ...}},    Finalizers: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  { -  Manager: "percona-xtradb-cluster-operator", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:24:07 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  }, -  { -  Manager: "kube-controller-manager", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:25:55 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., -  Subresource: "status", -  }, -  }, +  ManagedFields: nil,    },    Spec: v1.StatefulSetSpec{    Replicas: &2,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Template: v1.PodTemplateSpec{    ObjectMeta: v1.ObjectMeta{    ... // 9 identical fields    DeletionGracePeriodSeconds: nil,    Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Annotations: map[string]string{    "kubectl.kubernetes.io/default-container": "haproxy",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", +  "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    },    OwnerReferences: nil,    Finalizers: nil,    ManagedFields: nil,    },    Spec: v1.PodSpec{    Volumes: []v1.Volume{    {    Name: "haproxy-custom",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "monitoring-haproxy"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}},    {    Name: "mysql-users-secret-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "internal-monitoring",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "my-env-var-secrets",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "my-env-var-secrets",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {Name: "bin", VolumeSource: {EmptyDir: &{}}},    },    InitContainers: []v1.Container{    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    Containers: []v1.Container{    {    ... // 3 identical fields    Args: nil,    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "",    HostPort: 0,    ContainerPort: 7777, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30100, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30101, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30102, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30103, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30104, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30105, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: []v1.EnvVar{    {    Name: "POD_NAME",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.name",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {    Name: "POD_NAMESPACE",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.namespace",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"},    ... // 18 identical elements    {Name: "DB_HOST", Value: "localhost"},    {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"},    {    Name: "PMM_AGENT_SETUP_NODE_NAME", -  Value: "$(POD_NAMESPACE)-$(POD_NAME)", +  Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)",    ValueFrom: nil,    },    {Name: "DB_TYPE", Value: "haproxy"},    {Name: "MONITOR_USER", Value: "monitor"},    ... // 3 identical elements    },    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    ResizePolicy: nil,    ... // 2 identical fields    VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 300,    TimeoutSeconds: 5,    ... // 4 identical fields    },    ReadinessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 15,    TimeoutSeconds: 15,    ... // 4 identical fields    },    StartupProbe: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 3 identical fields    Args: {"haproxy"},    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "mysql",    HostPort: 0,    ContainerPort: 3306, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-replicas",    HostPort: 0,    ContainerPort: 3307, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "proxy-protocol",    HostPort: 0,    ContainerPort: 3309, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-admin",    HostPort: 0,    ContainerPort: 33062, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysqlx",    HostPort: 0,    ContainerPort: 33060, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "stats",    HostPort: 0,    ContainerPort: 8404, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}},    ... // 8 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    EphemeralContainers: nil, -  RestartPolicy: "Always", +  RestartPolicy: "",    TerminationGracePeriodSeconds: &30,    ActiveDeadlineSeconds: nil, -  DNSPolicy: "ClusterFirst", +  DNSPolicy: "",    NodeSelector: nil,    ServiceAccountName: "default", -  DeprecatedServiceAccount: "default", +  DeprecatedServiceAccount: "",    AutomountServiceAccountToken: nil,    NodeName: "",    ... // 2 identical fields    HostIPC: false,    ShareProcessNamespace: nil, -  SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., +  SecurityContext: nil,    ImagePullSecrets: nil,    Hostname: "",    Subdomain: "",    Affinity: nil, -  SchedulerName: "default-scheduler", +  SchedulerName: "",    Tolerations: nil,    HostAliases: nil,    ... // 6 identical fields    PreemptionPolicy: nil,    Overhead: nil, -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{},    SetHostnameAsFQDN: nil,    OS: nil,    ... // 6 identical fields    },    },    VolumeClaimTemplates: nil,    ServiceName: "monitoring-haproxy", -  PodManagementPolicy: "OrderedReady", +  PodManagementPolicy: "",    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil,    MinReadySeconds: 0, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  PersistentVolumeClaimRetentionPolicy: nil,    Ordinals: nil,    },    Status: v1.StatefulSetStatus{ -  ObservedGeneration: 1, +  ObservedGeneration: 0, -  Replicas: 2, +  Replicas: 0, -  ReadyReplicas: 2, +  ReadyReplicas: 0, -  CurrentReplicas: 2, +  CurrentReplicas: 0, -  UpdatedReplicas: 2, +  UpdatedReplicas: 0, -  CurrentRevision: "monitoring-haproxy-7bd9d8dfc6", +  CurrentRevision: "", -  UpdateRevision: "monitoring-haproxy-7bd9d8dfc6", +  UpdateRevision: "", -  CollisionCount: &0, +  CollisionCount: nil,    Conditions: nil, -  AvailableReplicas: 2, +  AvailableReplicas: 0,    },   } 2026-03-11T09:31:47.362Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "f7afd4a8-4524-4aec-b7d1-cdc9047b1a70", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true}   &v1.StatefulSet{    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    ObjectMeta: v1.ObjectMeta{    ... // 2 identical fields    Namespace: "monitoring-pmm3-14440",    SelfLink: "", -  UID: "4dadf6c2-900d-43b7-bdd8-7567844e3192", +  UID: "", -  ResourceVersion: "1773221155033183017", +  ResourceVersion: "", -  Generation: 1, +  Generation: 0, -  CreationTimestamp: v1.Time{Time: s"2026-03-11 09:24:07 +0000 UTC"}, +  CreationTimestamp: v1.Time{},    DeletionTimestamp: nil,    DeletionGracePeriodSeconds: nil,    Labels: nil, -  Annotations: map[string]string{ -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., -  }, +  Annotations: map[string]string{ +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., +  },    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "2ac7d1af-9cac-4e40-a153-80f1aef7bb35", ...}},    Finalizers: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  { -  Manager: "percona-xtradb-cluster-operator", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:24:07 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  }, -  { -  Manager: "kube-controller-manager", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:25:55 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., -  Subresource: "status", -  }, -  }, +  ManagedFields: nil,    },    Spec: v1.StatefulSetSpec{    Replicas: &2,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Template: v1.PodTemplateSpec{    ObjectMeta: v1.ObjectMeta{    ... // 9 identical fields    DeletionGracePeriodSeconds: nil,    Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Annotations: map[string]string{    "kubectl.kubernetes.io/default-container": "haproxy",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", +  "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    },    OwnerReferences: nil,    Finalizers: nil,    ManagedFields: nil,    },    Spec: v1.PodSpec{    Volumes: []v1.Volume{    {    Name: "haproxy-custom",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "monitoring-haproxy"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}},    {    Name: "mysql-users-secret-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "internal-monitoring",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "my-env-var-secrets",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "my-env-var-secrets",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {Name: "bin", VolumeSource: {EmptyDir: &{}}},    },    InitContainers: []v1.Container{    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    Containers: []v1.Container{    {    ... // 3 identical fields    Args: nil,    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "",    HostPort: 0,    ContainerPort: 7777, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30100, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30101, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30102, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30103, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30104, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30105, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: []v1.EnvVar{    {    Name: "POD_NAME",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.name",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {    Name: "POD_NAMESPACE",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.namespace",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"},    ... // 18 identical elements    {Name: "DB_HOST", Value: "localhost"},    {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"},    {    Name: "PMM_AGENT_SETUP_NODE_NAME", -  Value: "$(POD_NAMESPACE)-$(POD_NAME)", +  Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)",    ValueFrom: nil,    },    {Name: "DB_TYPE", Value: "haproxy"},    {Name: "MONITOR_USER", Value: "monitor"},    ... // 3 identical elements    },    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    ResizePolicy: nil,    ... // 2 identical fields    VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 300,    TimeoutSeconds: 5,    ... // 4 identical fields    },    ReadinessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 15,    TimeoutSeconds: 15,    ... // 4 identical fields    },    StartupProbe: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 3 identical fields    Args: {"haproxy"},    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "mysql",    HostPort: 0,    ContainerPort: 3306, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-replicas",    HostPort: 0,    ContainerPort: 3307, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "proxy-protocol",    HostPort: 0,    ContainerPort: 3309, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-admin",    HostPort: 0,    ContainerPort: 33062, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysqlx",    HostPort: 0,    ContainerPort: 33060, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "stats",    HostPort: 0,    ContainerPort: 8404, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}},    ... // 8 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    EphemeralContainers: nil, -  RestartPolicy: "Always", +  RestartPolicy: "",    TerminationGracePeriodSeconds: &30,    ActiveDeadlineSeconds: nil, -  DNSPolicy: "ClusterFirst", +  DNSPolicy: "",    NodeSelector: nil,    ServiceAccountName: "default", -  DeprecatedServiceAccount: "default", +  DeprecatedServiceAccount: "",    AutomountServiceAccountToken: nil,    NodeName: "",    ... // 2 identical fields    HostIPC: false,    ShareProcessNamespace: nil, -  SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., +  SecurityContext: nil,    ImagePullSecrets: nil,    Hostname: "",    Subdomain: "",    Affinity: nil, -  SchedulerName: "default-scheduler", +  SchedulerName: "",    Tolerations: nil,    HostAliases: nil,    ... // 6 identical fields    PreemptionPolicy: nil,    Overhead: nil, -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{},    SetHostnameAsFQDN: nil,    OS: nil,    ... // 6 identical fields    },    },    VolumeClaimTemplates: nil,    ServiceName: "monitoring-haproxy", -  PodManagementPolicy: "OrderedReady", +  PodManagementPolicy: "",    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil,    MinReadySeconds: 0, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  PersistentVolumeClaimRetentionPolicy: nil,    Ordinals: nil,    },    Status: v1.StatefulSetStatus{ -  ObservedGeneration: 1, +  ObservedGeneration: 0, -  Replicas: 2, +  Replicas: 0, -  ReadyReplicas: 2, +  ReadyReplicas: 0, -  CurrentReplicas: 2, +  CurrentReplicas: 0, -  UpdatedReplicas: 2, +  UpdatedReplicas: 0, -  CurrentRevision: "monitoring-haproxy-7bd9d8dfc6", +  CurrentRevision: "", -  UpdateRevision: "monitoring-haproxy-7bd9d8dfc6", +  UpdateRevision: "", -  CollisionCount: &0, +  CollisionCount: nil,    Conditions: nil, -  AvailableReplicas: 2, +  AvailableReplicas: 0,    },   } 2026-03-11T09:32:50.600Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "8a11f41b-725c-4062-818c-5a667bc92897", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.182.194.32:33062: connect: connection refused"} 2026-03-11T09:33:48.157Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "bc04e7da-74f1-4439-ac2f-4915e5ce2fc3", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp 10.182.192.43:33062: connect: connection refused"} 2026-03-11T09:34:30.005Z INFO Password changed, updating user {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "user": "pmmservertoken"} 2026-03-11T09:34:30.025Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "user": "pmmservertoken"} 2026-03-11T09:34:30.025Z INFO PXC pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "last-applied-secret": "6c1a70bee22152a7908f2853108e6ea5a343c2e44b1076110abde03c4a82f874"} 2026-03-11T09:34:30.025Z INFO HAProxy pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "last-applied-secret": "6c1a70bee22152a7908f2853108e6ea5a343c2e44b1076110abde03c4a82f874"} 2026-03-11T09:34:30.027Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true}   &v1.StatefulSet{    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    ObjectMeta: v1.ObjectMeta{    ... // 2 identical fields    Namespace: "monitoring-pmm3-14440",    SelfLink: "", -  UID: "80339a3a-7837-48c5-aae0-4776b8bdda2f", +  UID: "", -  ResourceVersion: "1773221667794479006", +  ResourceVersion: "", -  Generation: 2, +  Generation: 0, -  CreationTimestamp: v1.Time{Time: s"2026-03-11 09:24:07 +0000 UTC"}, +  CreationTimestamp: v1.Time{},    DeletionTimestamp: nil,    DeletionGracePeriodSeconds: nil,    Labels: nil, -  Annotations: map[string]string{ -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., -  }, +  Annotations: map[string]string{ +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI2YzFhNzBiZWUyMjE1MmE3OTA4ZjI4NTMxMDhlNmVhNWEzNDNjMmU0NGIxMDc2MTEwYWJkZTAzYzRhODJmODc0IiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoiZDQxZDhjZDk4ZjAw"..., +  },    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "2ac7d1af-9cac-4e40-a153-80f1aef7bb35", ...}},    Finalizers: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  { -  Manager: "percona-xtradb-cluster-operator", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:31:47 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  }, -  { -  Manager: "kube-controller-manager", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:34:27 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., -  Subresource: "status", -  }, -  }, +  ManagedFields: nil,    },    Spec: v1.StatefulSetSpec{    Replicas: &3,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Template: v1.PodTemplateSpec{    ObjectMeta: v1.ObjectMeta{    ... // 9 identical fields    DeletionGracePeriodSeconds: nil,    Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Annotations: map[string]string{    "kubectl.kubernetes.io/default-container": "pxc", +  "last-applied-secret": "6c1a70bee22152a7908f2853108e6ea5a343c2e44b1076110abde03c4a82f874",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e",    "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    ... // 2 identical entries    },    OwnerReferences: nil,    Finalizers: nil,    ManagedFields: nil,    },    Spec: v1.PodSpec{    Volumes: []v1.Volume{    {Name: "tmp", VolumeSource: {EmptyDir: &{}}},    {    Name: "config",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "monitoring-pxc"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {    Name: "ssl-internal",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-ssl-internal",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "ssl",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "some-name-ssl",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "auto-config",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "auto-monitoring-pxc"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {    Name: "vault-keyring-secret",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-vault",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "mysql-users-secret-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "internal-monitoring",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "mysql-init-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "monitoring-mysql-init",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    },    InitContainers: []v1.Container{    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    Containers: []v1.Container{    {    ... // 3 identical fields    Args: nil,    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "",    HostPort: 0,    ContainerPort: 7777, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30100, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30101, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30102, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30103, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30104, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30105, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: []v1.EnvVar{    {    Name: "POD_NAME",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.name",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {    Name: "POD_NAMESPACE",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.namespace",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"},    ... // 25 identical elements    },    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    ResizePolicy: nil,    ... // 2 identical fields    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 300,    TimeoutSeconds: 5,    ... // 4 identical fields    },    ReadinessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 15,    TimeoutSeconds: 15,    ... // 4 identical fields    },    StartupProbe: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 3 identical fields    Args: {"mysqld"},    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "mysql",    HostPort: 0,    ContainerPort: 3306, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "sst",    HostPort: 0,    ContainerPort: 4444, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "write-set",    HostPort: 0,    ContainerPort: 4567, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "ist",    HostPort: 0,    ContainerPort: 4568, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-admin",    HostPort: 0,    ContainerPort: 33062, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysqlx",    HostPort: 0,    ContainerPort: 33060, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...},    ... // 4 identical fields    VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}},    InitialDelaySeconds: 300,    TimeoutSeconds: 5, -  PeriodSeconds: 10, +  PeriodSeconds: 0,    SuccessThreshold: 1,    FailureThreshold: 3,    TerminationGracePeriodSeconds: nil,    },    ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...},    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    EphemeralContainers: nil, -  RestartPolicy: "Always", +  RestartPolicy: "",    TerminationGracePeriodSeconds: &600,    ActiveDeadlineSeconds: nil, -  DNSPolicy: "ClusterFirst", +  DNSPolicy: "",    NodeSelector: nil,    ServiceAccountName: "default", -  DeprecatedServiceAccount: "default", +  DeprecatedServiceAccount: "",    AutomountServiceAccountToken: nil,    NodeName: "",    ... // 7 identical fields    Subdomain: "",    Affinity: nil, -  SchedulerName: "default-scheduler", +  SchedulerName: "",    Tolerations: nil,    HostAliases: nil,    ... // 6 identical fields    PreemptionPolicy: nil,    Overhead: nil, -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{},    SetHostnameAsFQDN: nil,    OS: nil,    ... // 6 identical fields    },    },    VolumeClaimTemplates: []v1.PersistentVolumeClaim{    {    TypeMeta: {},    ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Spec: v1.PersistentVolumeClaimSpec{    ... // 3 identical fields    VolumeName: "",    StorageClassName: nil, -  VolumeMode: &"Filesystem", +  VolumeMode: nil,    DataSource: nil,    DataSourceRef: nil,    VolumeAttributesClassName: nil,    },    Status: v1.PersistentVolumeClaimStatus{ -  Phase: "Pending", +  Phase: "",    AccessModes: nil,    Capacity: nil,    ... // 5 identical fields    },    },    },    ServiceName: "monitoring-pxc", -  PodManagementPolicy: "OrderedReady", +  PodManagementPolicy: "",    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil,    MinReadySeconds: 0, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  PersistentVolumeClaimRetentionPolicy: nil,    Ordinals: nil,    },    Status: v1.StatefulSetStatus{ -  ObservedGeneration: 2, +  ObservedGeneration: 0, -  Replicas: 3, +  Replicas: 0, -  ReadyReplicas: 3, +  ReadyReplicas: 0, -  CurrentReplicas: 3, +  CurrentReplicas: 0, -  UpdatedReplicas: 3, +  UpdatedReplicas: 0, -  CurrentRevision: "monitoring-pxc-6d5745c55f", +  CurrentRevision: "", -  UpdateRevision: "monitoring-pxc-6d5745c55f", +  UpdateRevision: "", -  CollisionCount: &0, +  CollisionCount: nil,    Conditions: nil, -  AvailableReplicas: 3, +  AvailableReplicas: 0,    },   } 2026-03-11T09:34:30.080Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "789e6f91-d277-4804-8df3-9e30dd162168", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true}   &v1.StatefulSet{    TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"},    ObjectMeta: v1.ObjectMeta{    ... // 2 identical fields    Namespace: "monitoring-pmm3-14440",    SelfLink: "", -  UID: "4dadf6c2-900d-43b7-bdd8-7567844e3192", +  UID: "", -  ResourceVersion: "1773221556410687017", +  ResourceVersion: "", -  Generation: 2, +  Generation: 0, -  CreationTimestamp: v1.Time{Time: s"2026-03-11 09:24:07 +0000 UTC"}, +  CreationTimestamp: v1.Time{},    DeletionTimestamp: nil,    DeletionGracePeriodSeconds: nil,    Labels: nil, -  Annotations: map[string]string{ -  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., -  }, +  Annotations: map[string]string{ +  "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI2YzFhNzBiZWUyMjE1MmE3OTA4ZjI4NTMxMDhlNmVhNWEzNDNjMmU0NGIxMDc2MTEwYWJkZTAzYzRhODJmODc0IiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoi"..., +  },    OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "2ac7d1af-9cac-4e40-a153-80f1aef7bb35", ...}},    Finalizers: nil, -  ManagedFields: []v1.ManagedFieldsEntry{ -  { -  Manager: "percona-xtradb-cluster-operator", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:31:47 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., -  }, -  { -  Manager: "kube-controller-manager", -  Operation: "Update", -  APIVersion: "apps/v1", -  Time: s"2026-03-11 09:32:36 +0000 UTC", -  FieldsType: "FieldsV1", -  FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., -  Subresource: "status", -  }, -  }, +  ManagedFields: nil,    },    Spec: v1.StatefulSetSpec{    Replicas: &2,    Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}},    Template: v1.PodTemplateSpec{    ObjectMeta: v1.ObjectMeta{    ... // 9 identical fields    DeletionGracePeriodSeconds: nil,    Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...},    Annotations: map[string]string{    "kubectl.kubernetes.io/default-container": "haproxy", +  "last-applied-secret": "6c1a70bee22152a7908f2853108e6ea5a343c2e44b1076110abde03c4a82f874",    "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e",    "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0",    },    OwnerReferences: nil,    Finalizers: nil,    ManagedFields: nil,    },    Spec: v1.PodSpec{    Volumes: []v1.Volume{    {    Name: "haproxy-custom",    VolumeSource: v1.VolumeSource{    ... // 16 identical fields    FC: nil,    AzureFile: nil,    ConfigMap: &v1.ConfigMapVolumeSource{    LocalObjectReference: {Name: "monitoring-haproxy"},    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    VsphereVolume: nil,    Quobyte: nil,    ... // 9 identical fields    },    },    {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}},    {    Name: "mysql-users-secret-file",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "internal-monitoring",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &false,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {    Name: "my-env-var-secrets",    VolumeSource: v1.VolumeSource{    ... // 3 identical fields    AWSElasticBlockStore: nil,    GitRepo: nil,    Secret: &v1.SecretVolumeSource{    SecretName: "my-env-var-secrets",    Items: nil, -  DefaultMode: &420, +  DefaultMode: nil,    Optional: &true,    },    NFS: nil,    ISCSI: nil,    ... // 22 identical fields    },    },    {Name: "bin", VolumeSource: {EmptyDir: &{}}},    },    InitContainers: []v1.Container{    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    Containers: []v1.Container{    {    ... // 3 identical fields    Args: nil,    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "",    HostPort: 0,    ContainerPort: 7777, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30100, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30101, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30102, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30103, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30104, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "",    HostPort: 0,    ContainerPort: 30105, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: []v1.EnvVar{    {    Name: "POD_NAME",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.name",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {    Name: "POD_NAMESPACE",    Value: "",    ValueFrom: &v1.EnvVarSource{    FieldRef: &v1.ObjectFieldSelector{ -  APIVersion: "v1", +  APIVersion: "",    FieldPath: "metadata.namespace",    },    ResourceFieldRef: nil,    ConfigMapKeyRef: nil,    ... // 2 identical fields    },    },    {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"},    {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"},    ... // 26 identical elements    },    Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}},    ResizePolicy: nil,    ... // 2 identical fields    VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}},    VolumeDevices: nil,    LivenessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 300,    TimeoutSeconds: 5,    ... // 4 identical fields    },    ReadinessProbe: &v1.Probe{    ProbeHandler: v1.ProbeHandler{    Exec: nil,    HTTPGet: &v1.HTTPGetAction{    Path: "/local/Status",    Port: {IntVal: 7777},    Host: "", -  Scheme: "HTTP", +  Scheme: "",    HTTPHeaders: nil,    },    TCPSocket: nil,    GRPC: nil,    },    InitialDelaySeconds: 15,    TimeoutSeconds: 15,    ... // 4 identical fields    },    StartupProbe: nil,    Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 3 identical fields    Args: {"haproxy"},    WorkingDir: "",    Ports: []v1.ContainerPort{    {    Name: "mysql",    HostPort: 0,    ContainerPort: 3306, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-replicas",    HostPort: 0,    ContainerPort: 3307, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "proxy-protocol",    HostPort: 0,    ContainerPort: 3309, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysql-admin",    HostPort: 0,    ContainerPort: 33062, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "mysqlx",    HostPort: 0,    ContainerPort: 33060, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    {    Name: "stats",    HostPort: 0,    ContainerPort: 8404, -  Protocol: "TCP", +  Protocol: "",    HostIP: "",    },    },    EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}},    Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}},    ... // 8 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    {    ... // 16 identical fields    StartupProbe: nil,    Lifecycle: nil, -  TerminationMessagePath: "/dev/termination-log", +  TerminationMessagePath: "", -  TerminationMessagePolicy: "File", +  TerminationMessagePolicy: "",    ImagePullPolicy: "Always",    SecurityContext: nil,    ... // 3 identical fields    },    },    EphemeralContainers: nil, -  RestartPolicy: "Always", +  RestartPolicy: "",    TerminationGracePeriodSeconds: &30,    ActiveDeadlineSeconds: nil, -  DNSPolicy: "ClusterFirst", +  DNSPolicy: "",    NodeSelector: nil,    ServiceAccountName: "default", -  DeprecatedServiceAccount: "default", +  DeprecatedServiceAccount: "",    AutomountServiceAccountToken: nil,    NodeName: "",    ... // 2 identical fields    HostIPC: false,    ShareProcessNamespace: nil, -  SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., +  SecurityContext: nil,    ImagePullSecrets: nil,    Hostname: "",    Subdomain: "",    Affinity: nil, -  SchedulerName: "default-scheduler", +  SchedulerName: "",    Tolerations: nil,    HostAliases: nil,    ... // 6 identical fields    PreemptionPolicy: nil,    Overhead: nil, -  TopologySpreadConstraints: nil, +  TopologySpreadConstraints: []v1.TopologySpreadConstraint{},    SetHostnameAsFQDN: nil,    OS: nil,    ... // 6 identical fields    },    },    VolumeClaimTemplates: nil,    ServiceName: "monitoring-haproxy", -  PodManagementPolicy: "OrderedReady", +  PodManagementPolicy: "",    UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, -  RevisionHistoryLimit: &10, +  RevisionHistoryLimit: nil,    MinReadySeconds: 0, -  PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", +  PersistentVolumeClaimRetentionPolicy: nil,    Ordinals: nil,    },    Status: v1.StatefulSetStatus{ -  ObservedGeneration: 2, +  ObservedGeneration: 0, -  Replicas: 2, +  Replicas: 0, -  ReadyReplicas: 2, +  ReadyReplicas: 0, -  CurrentReplicas: 2, +  CurrentReplicas: 0, -  UpdatedReplicas: 2, +  UpdatedReplicas: 0, -  CurrentRevision: "monitoring-haproxy-8684668678", +  CurrentRevision: "", -  UpdateRevision: "monitoring-haproxy-8684668678", +  UpdateRevision: "", -  CollisionCount: &0, +  CollisionCount: nil,    Conditions: nil, -  AvailableReplicas: 2, +  AvailableReplicas: 0,    },   } [mysql] 2026/03/11 09:36:28 packets.go:58 unexpected EOF [mysql] 2026/03/11 09:36:43 packets.go:58 unexpected EOF 2026-03-11T09:36:44.726Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "16466328-d1cb-4c37-ad80-889ad2a0e02c", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-pmm3-14440 on 34.118.224.10:53: no such host"} 2026-03-11T09:37:09.349Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "3d4ac267-1005-423b-bb37-be70ef85bbd4", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.374Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "0345549c-38ad-4e0d-a835-982b3158ca01", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.411Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "9b394cee-9aa2-4878-9e60-033d66fb2840", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.460Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "0df0d143-a4a3-487e-96f6-229da7ee5b4b", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.523Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "8566ba32-34ac-4107-88c7-0962d80ca594", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.629Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "617bff05-4405-42b5-a82c-c11fcc7dec92", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:09.817Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "b1dd13e4-fad3-4b75-a925-8b4ffada2831", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:10.161Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "b464e236-f12b-4f52-a9f4-fed9fc8146a2", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:10.827Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "451af05e-eff2-4c86-a15a-8b71031a4741", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:12.136Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "c34b6eb5-6368-461b-b99c-93f9f6b9c448", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:14.721Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "7707b4d2-2977-4886-b5ab-5b079544887b", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:19.887Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "83f069aa-d9c9-46fb-874d-356e14888c8d", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:30.172Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "f9ba00bd-8004-4abd-a846-81274bc80521", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:50.684Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "2fcc30c8-b0af-4659-9b6d-356f6ac7c700", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:38:31.671Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "fa36491f-e41c-4243-a5fe-917e6b3b0efc", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:39:53.620Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-14440"}, "namespace": "monitoring-pmm3-14440", "name": "monitoring", "reconcileID": "951e4b8e-44fd-4b5c-9fd0-fb1be599b299", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 max retry count 120 reached. something went wrong with operator or kubernetes cluster