Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/logs/monitoring-pmm3-8-0.log Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-pmm3-7039 + local ns=monitoring-pmm3-7039 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-pmm3-32292 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.zbwIdSivS6 ++ mktemp + local LAST_ERR=/tmp/tmp.0wOgAwCEs5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zbwIdSivS6 perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-pmm3-32292 namespace + cat /tmp/tmp.0wOgAwCEs5 + rm /tmp/tmp.zbwIdSivS6 /tmp/tmp.0wOgAwCEs5 + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.fpg1WWKt6d ++ mktemp + local LAST_ERR=/tmp/tmp.TbyhHaxcmX + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fpg1WWKt6d No resources found + cat /tmp/tmp.TbyhHaxcmX + rm /tmp/tmp.fpg1WWKt6d /tmp/tmp.TbyhHaxcmX + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.KV4uO5EmPN ++ mktemp + local LAST_ERR=/tmp/tmp.v7YsSQ2hHU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.KV4uO5EmPN No resources found + cat /tmp/tmp.v7YsSQ2hHU + rm /tmp/tmp.KV4uO5EmPN /tmp/tmp.v7YsSQ2hHU + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ grep chaos-mesh.org ++ kubectl get crd + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ++ mktemp ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + local LAST_OUT=/tmp/tmp.Sp0KbMqA99 + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.YJJsujuaSm + local exit_status=0 + local LAST_OUT=/tmp/tmp.hlNGcdqGXh ++ seq 0 2 ++ mktemp + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.zNUY8U1q4H + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + xargs kubectl delete ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Sp0KbMqA99 + cat /tmp/tmp.YJJsujuaSm + rm /tmp/tmp.Sp0KbMqA99 /tmp/tmp.YJJsujuaSm + return 0 namespace "monitoring-pmm3-32292" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hlNGcdqGXh namespace "pxc-operator" deleted + cat /tmp/tmp.zNUY8U1q4H + rm /tmp/tmp.hlNGcdqGXh /tmp/tmp.zNUY8U1q4H + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.L8jgCFaUXW ++ mktemp + local LAST_ERR=/tmp/tmp.eEUQxEjQP9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.L8jgCFaUXW namespace/pxc-operator created + cat /tmp/tmp.eEUQxEjQP9 + rm /tmp/tmp.L8jgCFaUXW /tmp/tmp.eEUQxEjQP9 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.uDrReRQQZ6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.AkioLQxUYk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.uDrReRQQZ6 ++ cat /tmp/tmp.AkioLQxUYk ++ rm /tmp/tmp.uDrReRQQZ6 /tmp/tmp.AkioLQxUYk ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.MUDg7JU0X6 ++ mktemp + local LAST_ERR=/tmp/tmp.Hc2z8zgnFj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MUDg7JU0X6 Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7" modified. + cat /tmp/tmp.Hc2z8zgnFj + rm /tmp/tmp.MUDg7JU0X6 /tmp/tmp.Hc2z8zgnFj + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.yi0hzoUmK2 ++ mktemp + local LAST_ERR=/tmp/tmp.z2Fg1xo9Gb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yi0hzoUmK2 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.z2Fg1xo9Gb + rm /tmp/tmp.yi0hzoUmK2 /tmp/tmp.z2Fg1xo9Gb + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-rbac.yaml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.JpqEorr6MJ ++ mktemp + local LAST_ERR=/tmp/tmp.Jak7VzD3nI + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.JpqEorr6MJ clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.Jak7VzD3nI + rm /tmp/tmp.JpqEorr6MJ /tmp/tmp.Jak7VzD3nI + return 0 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/deploy/cw-operator.yaml + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' ++ mktemp + local LAST_OUT=/tmp/tmp.FuzSadRJOF ++ mktemp + local LAST_ERR=/tmp/tmp.7iZ6eme1vn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FuzSadRJOF deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.7iZ6eme1vn + rm /tmp/tmp.FuzSadRJOF /tmp/tmp.7iZ6eme1vn + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.2N2oJjVg3T ++ mktemp + local LAST_ERR=/tmp/tmp.lQjsAuH8iU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2N2oJjVg3T pod/percona-xtradb-cluster-operator-944bd69c8-9g8wj condition met + cat /tmp/tmp.lQjsAuH8iU + rm /tmp/tmp.2N2oJjVg3T /tmp/tmp.lQjsAuH8iU + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ head -1 ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sv62MkrPzp +++ mktemp ++ local LAST_ERR=/tmp/tmp.eNdoxrt7kX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sv62MkrPzp ++ cat /tmp/tmp.eNdoxrt7kX ++ rm /tmp/tmp.sv62MkrPzp /tmp/tmp.eNdoxrt7kX ++ return 0 + wait_pod percona-xtradb-cluster-operator-944bd69c8-9g8wj 480 pxc-operator + local pod=percona-xtradb-cluster-operator-944bd69c8-9g8wj + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-944bd69c8-9g8wj ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-944bd69c8-9g8wj condition met waiting for pod/percona-xtradb-cluster-operator-944bd69c8-9g8wj to become Ready.Ok + sleep 3 + create_namespace monitoring-pmm3-7039 + local namespace=monitoring-pmm3-7039 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-pmm3-7039' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-pmm3-7039 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-pmm3-7039 + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.fLLNwgf3hs + local LAST_OUT=/tmp/tmp.eoduVxrsIy ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.K1yU34zuDQ + local exit_status=0 ++ seq 0 2 + xargs kubectl delete ns + local LAST_ERR=/tmp/tmp.dot7lPqWO2 + local exit_status=0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-7039 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eoduVxrsIy + cat /tmp/tmp.dot7lPqWO2 + rm /tmp/tmp.eoduVxrsIy /tmp/tmp.dot7lPqWO2 + return 0 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-7039 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-pmm3-7039 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.fLLNwgf3hs + cat /tmp/tmp.K1yU34zuDQ Error from server (NotFound): namespaces "monitoring-pmm3-7039" not found + rm /tmp/tmp.fLLNwgf3hs /tmp/tmp.K1yU34zuDQ + return 1 + : + wait_for_delete namespace/monitoring-pmm3-7039 + local res=namespace/monitoring-pmm3-7039 + echo -n 'waiting for namespace/monitoring-pmm3-7039 to be deleted' waiting for namespace/monitoring-pmm3-7039 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-pmm3-7039" not found + desc 'create namespace monitoring-pmm3-7039' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-pmm3-7039 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-pmm3-7039 ++ mktemp + local LAST_OUT=/tmp/tmp.TxHeDUVmM8 ++ mktemp + local LAST_ERR=/tmp/tmp.OshkfhdGCS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-pmm3-7039 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.TxHeDUVmM8 namespace/monitoring-pmm3-7039 created + cat /tmp/tmp.OshkfhdGCS + rm /tmp/tmp.TxHeDUVmM8 /tmp/tmp.OshkfhdGCS + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.BHCWIcz11o +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y2Q0o35F9j ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.BHCWIcz11o ++ cat /tmp/tmp.Y2Q0o35F9j ++ rm /tmp/tmp.BHCWIcz11o /tmp/tmp.Y2Q0o35F9j ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7 --namespace=monitoring-pmm3-7039 ++ mktemp + local LAST_OUT=/tmp/tmp.Qag7IeIqEz ++ mktemp + local LAST_ERR=/tmp/tmp.yeKub7LMJJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7 --namespace=monitoring-pmm3-7039 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Qag7IeIqEz Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2384-7f4bfbf4-1-cluster7" modified. + cat /tmp/tmp.yeKub7LMJJ + rm /tmp/tmp.Qag7IeIqEz /tmp/tmp.yeKub7LMJJ + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.sTEOifhRQg ++ mktemp + local LAST_ERR=/tmp/tmp.v7hLSZUzaF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sTEOifhRQg secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.v7hLSZUzaF + rm /tmp/tmp.sTEOifhRQg /tmp/tmp.v7hLSZUzaF + return 0 + deploy_helm monitoring-pmm3-7039 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm3_server + helm uninstall -n '' monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + helm repo remove percona "percona" has been removed from your repositories + kubectl delete clusterrole monitoring --ignore-not-found + kubectl delete clusterrolebinding monitoring --ignore-not-found + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" has been added to your repositories + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + '[' '!' -z '' ']' + retry 10 60 helm install monitoring percona/pmm -n '' --set fullnameOverride=monitoring --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring percona/pmm -n '' --set fullnameOverride=monitoring --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force NAME: monitoring LAST DEPLOYED: Wed Mar 11 09:20:13 2026 NAMESPACE: monitoring-pmm3-7039 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: Percona Monitoring and Management (PMM) An open source database monitoring, observability and management tool Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html Get the application URL: NOTE: It may take a few minutes for the LoadBalancer IP to be available. You can watch the status of by running 'kubectl get --namespace monitoring-pmm3-7039 svc -w monitoring-service' export SERVICE_IP=$(kubectl get svc --namespace monitoring-pmm3-7039 monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") echo https://$SERVICE_IP: Get password for the "admin" user: export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace monitoring-pmm3-7039 -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) echo $ADMIN_PASS + wait_for_pmm_service + timeout=420 ++ date +%s + start=1773220816 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220817 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220820 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220823 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220826 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220830 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220833 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220837 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220840 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220844 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220847 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220850 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1773220853 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220856 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1773220859 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . + kubectl_bin wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s ++ mktemp + local LAST_OUT=/tmp/tmp.stv5UfCAHK ++ mktemp + local LAST_ERR=/tmp/tmp.MryMLP0LZu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.stv5UfCAHK statefulset.apps/monitoring condition met + cat /tmp/tmp.MryMLP0LZu + rm /tmp/tmp.stv5UfCAHK /tmp/tmp.MryMLP0LZu + return 0 + desc 'create secret' + set +o xtrace ----------------------------------------------------------------------------------- create secret ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.S85uJhK7oO ++ mktemp + local LAST_ERR=/tmp/tmp.aZ2Y5ZVT5w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.S85uJhK7oO secret/my-cluster-secrets created + cat /tmp/tmp.aZ2Y5ZVT5w + rm /tmp/tmp.S85uJhK7oO /tmp/tmp.aZ2Y5ZVT5w + return 0 + desc 'add PMM3 token to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM3 token to secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator ++ local key_name=operator ++ [[ -z operator ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='iKK^Hf^sD^bE~5P8' ++ [[ -z iKK^Hf^sD^bE~5P8 ]] ++ local create_response create_status_code create_json_response ++ local retry=0 ++ [[ '' == 201 ]] ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.4ReliUiVV4 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.AbiYmmdRqN +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.4ReliUiVV4 +++++ cat /tmp/tmp.AbiYmmdRqN +++++ rm /tmp/tmp.4ReliUiVV4 /tmp/tmp.AbiYmmdRqN +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.T2pe3wAHBd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gr67XjtHQj ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.T2pe3wAHBd ++++ cat /tmp/tmp.gr67XjtHQj ++++ rm /tmp/tmp.T2pe3wAHBd /tmp/tmp.gr67XjtHQj ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.7DjLRsfrji +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.xT4hoaDiVx ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.7DjLRsfrji ++++ cat /tmp/tmp.xT4hoaDiVx ++++ rm /tmp/tmp.7DjLRsfrji /tmp/tmp.xT4hoaDiVx ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ sleep 5 ++ let retry+=1 ++ '[' 1 -ge 24 ']' ++ [[ 201 == 201 ]] ++ local service_account_id +++ echo '{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=2 ++ [[ -z 2 ]] ++ [[ 2 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.mIwCwN8WkU ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.q3JdOtjgVs +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.mIwCwN8WkU +++++ cat /tmp/tmp.q3JdOtjgVs +++++ rm /tmp/tmp.mIwCwN8WkU /tmp/tmp.q3JdOtjgVs +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.wN2V5jBMyl +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.2ul1AMW1xk ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.wN2V5jBMyl ++++ cat /tmp/tmp.2ul1AMW1xk ++++ rm /tmp/tmp.wN2V5jBMyl /tmp/tmp.2ul1AMW1xk ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.sWL03IYQhv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.bCzRgT2ARO ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.sWL03IYQhv ++++ cat /tmp/tmp.bCzRgT2ARO ++++ rm /tmp/tmp.sWL03IYQhv /tmp/tmp.bCzRgT2ARO ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' ++ token_response='{"id":1,"name":"operator","key":"glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"} 200' +++ echo '{"id":1,"name":"operator","key":"glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":1,"name":"operator","key":"glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"} 200' +++ sed '$ d' ++ token_json_response='{"id":1,"name":"operator","key":"glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"}' ++ [[ 200 -ne 200 ]] ++ echo '{"id":1,"name":"operator","key":"glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"}' ++ jq -r .key + TOKEN=glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.jT01W32K2h ++ mktemp + local LAST_ERR=/tmp/tmp.90CYulte7d + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_F6pV1sYTut4Chhch0p9Reu5pBK4dFsNZ_cfe5a1bd"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.jT01W32K2h secret/my-cluster-secrets patched + cat /tmp/tmp.90CYulte7d + rm /tmp/tmp.jT01W32K2h /tmp/tmp.90CYulte7d + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml 3 120 + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local size=3 + local sleep=120 + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + local LAST_OUT=/tmp/tmp.spw7bSFYji + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' ++ mktemp + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-pmm3-7039~ + local LAST_ERR=/tmp/tmp.uEWgSHvoh4 + local exit_status=0 ++ seq 0 2 + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/conf/client.yml + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.spw7bSFYji deployment.apps/pxc-client created + cat /tmp/tmp.uEWgSHvoh4 + rm /tmp/tmp.spw7bSFYji /tmp/tmp.uEWgSHvoh4 + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.N8ByEldSz1 ++ mktemp + local LAST_ERR=/tmp/tmp.EulNJBC3Gw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + local pvc_name= + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2384-7f4bfbf4#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-pmm3-7039~ + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/monitoring.yml + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.N8ByEldSz1 perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.EulNJBC3Gw + rm /tmp/tmp.N8ByEldSz1 /tmp/tmp.EulNJBC3Gw + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TLWM5PqM9v ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xBfb9qaw63 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TLWM5PqM9v +++ cat /tmp/tmp.xBfb9qaw63 +++ rm /tmp/tmp.TLWM5PqM9v /tmp/tmp.xBfb9qaw63 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-pmm3-7039 ++ mktemp + local LAST_OUT=/tmp/tmp.weGG6OuffF ++ mktemp + local LAST_ERR=/tmp/tmp.dVi04hEqzp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-pmm3-7039 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.weGG6OuffF pod/monitoring-haproxy-0 condition met pod/monitoring-pxc-0 condition met + cat /tmp/tmp.dVi04hEqzp + rm /tmp/tmp.weGG6OuffF /tmp/tmp.dVi04hEqzp + return 0 + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.XVdFMqaqAL +++ mktemp ++ local LAST_ERR=/tmp/tmp.b81n1rP3M9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XVdFMqaqAL ++ cat /tmp/tmp.b81n1rP3M9 ++ rm /tmp/tmp.XVdFMqaqAL /tmp/tmp.b81n1rP3M9 ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.SYilCWKHOm +++ mktemp ++ local LAST_ERR=/tmp/tmp.2WEn5uhspS ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.SYilCWKHOm ++ cat /tmp/tmp.2WEn5uhspS ++ rm /tmp/tmp.SYilCWKHOm /tmp/tmp.2WEn5uhspS ++ return 0 + client_pod=pxc-client-67fc4995bb-7k5jz + wait_pod pxc-client-67fc4995bb-7k5jz + local pod=pxc-client-67fc4995bb-7k5jz + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-7k5jz ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-7k5jz condition met waiting for pod/pxc-client-67fc4995bb-7k5jz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.I9gxAuba5N +++ mktemp ++ local LAST_ERR=/tmp/tmp.9E0MZdVSz2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I9gxAuba5N ++ cat /tmp/tmp.9E0MZdVSz2 ++ rm /tmp/tmp.I9gxAuba5N /tmp/tmp.9E0MZdVSz2 ++ return 0 + client_pod=pxc-client-67fc4995bb-7k5jz + wait_pod pxc-client-67fc4995bb-7k5jz + local pod=pxc-client-67fc4995bb-7k5jz + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-67fc4995bb-7k5jz ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-7k5jz condition met waiting for pod/pxc-client-67fc4995bb-7k5jz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.WY7iryjwAA +++ mktemp ++ local LAST_ERR=/tmp/tmp.QgVHpBAbsd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WY7iryjwAA ++ cat /tmp/tmp.QgVHpBAbsd ++ rm /tmp/tmp.WY7iryjwAA /tmp/tmp.QgVHpBAbsd ++ return 0 + client_pod=pxc-client-67fc4995bb-7k5jz + wait_pod pxc-client-67fc4995bb-7k5jz + local pod=pxc-client-67fc4995bb-7k5jz + local max_retry=480 + local ns= ++ echo pxc-client-67fc4995bb-7k5jz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-7k5jz condition met waiting for pod/pxc-client-67fc4995bb-7k5jz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.TyCF3e9CRW/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.TyCF3e9CRW/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.TyCF3e9CRW/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Xp93LYqHyt +++ mktemp ++ local LAST_ERR=/tmp/tmp.NbEHXz1Wag ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Xp93LYqHyt ++ cat /tmp/tmp.NbEHXz1Wag ++ rm /tmp/tmp.Xp93LYqHyt /tmp/tmp.NbEHXz1Wag ++ return 0 + client_pod=pxc-client-67fc4995bb-7k5jz + wait_pod pxc-client-67fc4995bb-7k5jz + local pod=pxc-client-67fc4995bb-7k5jz + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-67fc4995bb-7k5jz ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-67fc4995bb-7k5jz condition met waiting for pod/pxc-client-67fc4995bb-7k5jz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.TyCF3e9CRW/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.TyCF3e9CRW/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.TyCF3e9CRW/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YbKthubzId +++ mktemp ++ local LAST_ERR=/tmp/tmp.SMOLZiW1TE ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YbKthubzId ++ cat /tmp/tmp.SMOLZiW1TE ++ rm /tmp/tmp.YbKthubzId /tmp/tmp.SMOLZiW1TE ++ return 0 + client_pod=pxc-client-67fc4995bb-7k5jz + wait_pod pxc-client-67fc4995bb-7k5jz + local pod=pxc-client-67fc4995bb-7k5jz + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-67fc4995bb-7k5jz + local container= + set +o xtrace pod/pxc-client-67fc4995bb-7k5jz condition met waiting for pod/pxc-client-67fc4995bb-7k5jz to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.TyCF3e9CRW/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.TyCF3e9CRW/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/select-1.sql /tmp/tmp.TyCF3e9CRW/select-1.sql ++ is_keyring_plugin_in_use monitoring ++ local cluster=monitoring ++ [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] ++ kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ grep -E -o 'early-plugin-load=keyring_\w+.so' Unable to use a TTY - input is not a terminal or the right kind of file ++ return 1 + '[' '' ']' + wait_for_generation sts/monitoring-pxc 1 + local resource=sts/monitoring-pxc + local target_generation=1 + echo 'Waiting for sts/monitoring-pxc to reach generation 1...' Waiting for sts/monitoring-pxc to reach generation 1... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 1 ']' + echo 'Resource sts/monitoring-pxc has reached generation 1.' Resource sts/monitoring-pxc has reached generation 1. + break + wait_for_generation sts/monitoring-haproxy 1 + local resource=sts/monitoring-haproxy + local target_generation=1 + echo 'Waiting for sts/monitoring-haproxy to reach generation 1...' Waiting for sts/monitoring-haproxy to reach generation 1... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 1 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 1.' Resource sts/monitoring-haproxy has reached generation 1. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/monitoring to be ready' waiting for pxc/monitoring to be ready++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TqamBPz5mW +++ mktemp ++ local LAST_ERR=/tmp/tmp.fDFwupqonO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TqamBPz5mW ++ cat /tmp/tmp.fDFwupqonO ++ rm /tmp/tmp.TqamBPz5mW /tmp/tmp.fDFwupqonO ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ErAFDSuH82 +++ mktemp ++ local LAST_ERR=/tmp/tmp.HeNckfaGW7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ErAFDSuH82 ++ cat /tmp/tmp.HeNckfaGW7 ++ rm /tmp/tmp.ErAFDSuH82 /tmp/tmp.HeNckfaGW7 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.UhTLGoQfO8 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.DvluLCQlTP +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.UhTLGoQfO8 +++++ cat /tmp/tmp.DvluLCQlTP +++++ rm /tmp/tmp.UhTLGoQfO8 /tmp/tmp.DvluLCQlTP +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mjynB9XGtC +++ mktemp ++ local LAST_ERR=/tmp/tmp.VgCRkOYSAe ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mjynB9XGtC ++ cat /tmp/tmp.VgCRkOYSAe ++ rm /tmp/tmp.mjynB9XGtC /tmp/tmp.VgCRkOYSAe ++ return 0 + [[ 2 == \2 ]] + echo + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.TyCF3e9CRW/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127-aks.yml ']' + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-7039", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + local LAST_OUT=/tmp/tmp.H8YQef38Rc ++ mktemp + local LAST_ERR=/tmp/tmp.LyoqZtB6aj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.H8YQef38Rc + cat /tmp/tmp.LyoqZtB6aj + rm /tmp/tmp.H8YQef38Rc /tmp/tmp.LyoqZtB6aj + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-no-prefix-k127.yml /tmp/tmp.TyCF3e9CRW/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:28:48+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-03-11T09:28:48+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.TyCF3e9CRW/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ bc -l ++ echo '1.32 >= 1.33' + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-7039", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.oPaKSrby0f ++ mktemp + local LAST_ERR=/tmp/tmp.kg4qshuLH5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oPaKSrby0f + cat /tmp/tmp.kg4qshuLH5 + rm /tmp/tmp.oPaKSrby0f /tmp/tmp.kg4qshuLH5 + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml /tmp/tmp.TyCF3e9CRW/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:28:50+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-03-11T09:28:50+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.1Fol1203Am ++ mktemp + local LAST_ERR=/tmp/tmp.PEiDCOhMgk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1Fol1203Am secret/my-env-var-secrets created + cat /tmp/tmp.PEiDCOhMgk + rm /tmp/tmp.1Fol1203Am /tmp/tmp.PEiDCOhMgk + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 2 ']' + echo 'Resource sts/monitoring-pxc is at generation 1. Waiting...' Resource sts/monitoring-pxc is at generation 1. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + desc 'create new PMM token and add it to the secret' + set +o xtrace ----------------------------------------------------------------------------------- create new PMM token and add it to the secret ----------------------------------------------------------------------------------- ++ get_pmm_server_token operator_new ++ local key_name=operator_new ++ [[ -z operator_new ]] ++ local ADMIN_PASSWORD +++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' +++ base64 --decode ++ ADMIN_PASSWORD='iKK^Hf^sD^bE~5P8' ++ [[ -z iKK^Hf^sD^bE~5P8 ]] ++ local create_response create_status_code create_json_response ++ local retry=0 ++ [[ '' == 201 ]] ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.bBB1mGcA9v ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.mlD800lC7X +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.bBB1mGcA9v +++++ cat /tmp/tmp.mlD800lC7X +++++ rm /tmp/tmp.bBB1mGcA9v /tmp/tmp.mlD800lC7X +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.hF23iLJGt7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.hGa5s0eDfs ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.hF23iLJGt7 ++++ cat /tmp/tmp.hGa5s0eDfs ++++ rm /tmp/tmp.hF23iLJGt7 /tmp/tmp.hGa5s0eDfs ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.onqq0i0En2 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.5stWRr5H9i ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.onqq0i0En2 ++++ cat /tmp/tmp.5stWRr5H9i ++++ rm /tmp/tmp.onqq0i0En2 /tmp/tmp.5stWRr5H9i ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator_new", "role":"Admin", "isDisabled":false}' --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts -w '\n%{http_code}' ++ create_response='{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ echo '{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ tail -n1 ++ create_status_code=201 +++ echo '{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} 201' +++ sed '$ d' ++ create_json_response='{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' ++ sleep 5 ++ let retry+=1 ++ '[' 1 -ge 24 ']' ++ [[ 201 == 201 ]] ++ local service_account_id +++ echo '{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' +++ jq -r .id ++ service_account_id=3 ++ [[ -z 3 ]] ++ [[ 3 == \n\u\l\l ]] ++ local token_response token_status_code token_json_response ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.JKwOrco76V ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.dXZWFW4pPt +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.JKwOrco76V +++++ cat /tmp/tmp.dXZWFW4pPt +++++ rm /tmp/tmp.JKwOrco76V /tmp/tmp.dXZWFW4pPt +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.91LC4OJgfw +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.gcekDJqeRn ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.91LC4OJgfw ++++ cat /tmp/tmp.gcekDJqeRn ++++ rm /tmp/tmp.91LC4OJgfw /tmp/tmp.gcekDJqeRn ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yV2aPzPBRv +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.eaK3xRRD5R ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.yV2aPzPBRv ++++ cat /tmp/tmp.eaK3xRRD5R ++++ rm /tmp/tmp.yV2aPzPBRv /tmp/tmp.eaK3xRRD5R ++++ return 0 +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator_new"}' --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' ++ token_response='{"id":2,"name":"operator_new","key":"glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"} 200' +++ echo '{"id":2,"name":"operator_new","key":"glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"} 200' +++ tail -n1 ++ token_status_code=200 +++ echo '{"id":2,"name":"operator_new","key":"glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"} 200' +++ sed '$ d' ++ token_json_response='{"id":2,"name":"operator_new","key":"glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"}' ++ [[ 200 -ne 200 ]] ++ jq -r .key ++ echo '{"id":2,"name":"operator_new","key":"glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"}' + NEW_TOKEN=glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.yCr17zc8vP ++ mktemp + local LAST_ERR=/tmp/tmp.JMOUqZCiYA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmservertoken": "glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yCr17zc8vP secret/my-cluster-secrets patched + cat /tmp/tmp.JMOUqZCiYA + rm /tmp/tmp.yCr17zc8vP /tmp/tmp.JMOUqZCiYA + return 0 + desc 'delete old PMM token' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM token ----------------------------------------------------------------------------------- + delete_pmm_server_token operator + local key_name=operator + [[ -z operator ]] + local ADMIN_PASSWORD ++ base64 --decode ++ kubectl get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' + ADMIN_PASSWORD='iKK^Hf^sD^bE~5P8' + [[ -z iKK^Hf^sD^bE~5P8 ]] + local 'user_credentials=admin:iKK^Hf^sD^bE~5P8' + local service_accounts_response service_accounts_status +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.76E0x3nakN +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.54HocHhd6m ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.76E0x3nakN ++++ cat /tmp/tmp.54HocHhd6m ++++ rm /tmp/tmp.76E0x3nakN /tmp/tmp.54HocHhd6m ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZvPgcR2Xif ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MgK0xFiF4J +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ZvPgcR2Xif +++ cat /tmp/tmp.MgK0xFiF4J +++ rm /tmp/tmp.ZvPgcR2Xif /tmp/tmp.MgK0xFiF4J +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Mw9NbqBhc4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.UytYnK1qT2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Mw9NbqBhc4 +++ cat /tmp/tmp.UytYnK1qT2 +++ rm /tmp/tmp.Mw9NbqBhc4 /tmp/tmp.UytYnK1qT2 +++ return 0 ++ curl --insecure -s -X GET --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts/search -w '\n%{http_code}' + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ tail -n1 + service_accounts_status=200 ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} 200' ++ sed '$ d' + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' + [[ 200 -ne 200 ]] + local service_account_id ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"fffok09imkagwa","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"effoko9lv2m80d","name":"operator_new","login":"sa-1-operator_new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' + service_account_id=2 + [[ -z 2 ]] + [[ 2 == \n\u\l\l ]] + local tokens_response tokens_status tokens_json +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ grep -q NotFound +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.TcC5LnUan9 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UIWWldf2ua ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.TcC5LnUan9 ++++ cat /tmp/tmp.UIWWldf2ua ++++ rm /tmp/tmp.TcC5LnUan9 /tmp/tmp.UIWWldf2ua ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.kEv1d2fLww ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TrjDbDmLV7 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.kEv1d2fLww +++ cat /tmp/tmp.TrjDbDmLV7 +++ rm /tmp/tmp.kEv1d2fLww /tmp/tmp.TrjDbDmLV7 +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.z8XlNi5I9K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.j6EvOrqow7 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.z8XlNi5I9K +++ cat /tmp/tmp.j6EvOrqow7 +++ rm /tmp/tmp.z8XlNi5I9K /tmp/tmp.j6EvOrqow7 +++ return 0 ++ curl --insecure -s -X GET --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' + tokens_response='[{"id":1,"name":"operator","created":"2026-03-11T09:21:47Z","lastUsedAt":"2026-03-11T09:27:01Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:21:47Z","lastUsedAt":"2026-03-11T09:27:01Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ tail -n1 + tokens_status=200 ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:21:47Z","lastUsedAt":"2026-03-11T09:27:01Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] 200' ++ sed '$ d' + tokens_json='[{"id":1,"name":"operator","created":"2026-03-11T09:21:47Z","lastUsedAt":"2026-03-11T09:27:01Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' + [[ 200 -ne 200 ]] + local token_id ++ echo '[{"id":1,"name":"operator","created":"2026-03-11T09:21:47Z","lastUsedAt":"2026-03-11T09:27:01Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' ++ jq -r '.[] | select(.name == "operator").id' + token_id=1 + [[ -z 1 ]] + [[ 1 == \n\u\l\l ]] + local delete_response delete_status +++ get_service_ip monitoring-service +++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.8gfQudy5wt +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.FcXr98yAdm ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.8gfQudy5wt ++++ cat /tmp/tmp.FcXr98yAdm ++++ rm /tmp/tmp.8gfQudy5wt /tmp/tmp.FcXr98yAdm ++++ return 0 +++ '[' LoadBalancer = ClusterIP ']' +++ grep -E -q 'hostname|ip' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.l8d7CZg0Xc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.wUyudCyxQ2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.l8d7CZg0Xc +++ cat /tmp/tmp.wUyudCyxQ2 +++ rm /tmp/tmp.l8d7CZg0Xc /tmp/tmp.wUyudCyxQ2 +++ return 0 +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.x5EKFOCM4e ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mXyqJDWhzp +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.x5EKFOCM4e +++ cat /tmp/tmp.mXyqJDWhzp +++ rm /tmp/tmp.x5EKFOCM4e /tmp/tmp.mXyqJDWhzp +++ return 0 ++ curl --insecure -s -X DELETE --user 'admin:iKK^Hf^sD^bE~5P8' https://35.184.158.12/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' + delete_response='{"message":"Service account token deleted"} 200' ++ echo '{"message":"Service account token deleted"} 200' ++ tail -n1 + delete_status=200 + [[ 200 -ne 200 ]] + wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.TyCF3e9CRW/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-7039", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.QXUfnGG3Vm ++ mktemp + local LAST_ERR=/tmp/tmp.jQSfLWIOgr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QXUfnGG3Vm + cat /tmp/tmp.jQSfLWIOgr + rm /tmp/tmp.QXUfnGG3Vm /tmp/tmp.jQSfLWIOgr + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-pxc-k127.yml /tmp/tmp.TyCF3e9CRW/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:32:28+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2026-03-11T09:32:28+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.TyCF3e9CRW/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.32 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.32 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.32 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-pmm3-7039", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.SjKKvFHYC5 ++ mktemp + local LAST_ERR=/tmp/tmp.MBiekF3NDp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SjKKvFHYC5 + cat /tmp/tmp.MBiekF3NDp + rm /tmp/tmp.SjKKvFHYC5 /tmp/tmp.MBiekF3NDp + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2384/e2e-tests/monitoring-pmm3/compare/statefulset_monitoring-haproxy-k127.yml /tmp/tmp.TyCF3e9CRW/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2026-03-11T09:32:29+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2026-03-11T09:32:29+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + sleep 60 + get_metric_values_pmm3 node_boot_time_seconds pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0 glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0 + local token=glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221549 ++ /usr/bin/date -u +%s + local end=1773221609 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.dL7eljXkx4 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.TtYmVDtw72 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.dL7eljXkx4 +++ cat /tmp/tmp.TtYmVDtw72 +++ rm /tmp/tmp.dL7eljXkx4 /tmp/tmp.TtYmVDtw72 +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.BVsZZdHrpg ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KxUEv9Zgak +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.BVsZZdHrpg +++ cat /tmp/tmp.KxUEv9Zgak +++ rm /tmp/tmp.BVsZZdHrpg /tmp/tmp.KxUEv9Zgak +++ return 0 ++ endpoint=35.184.158.12 ++ '[' -n 35.184.158.12 ']' ++ '[' 35.184.158.12 '!=' null ']' ++ echo 35.184.158.12 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=35.184.158.12 + '[' -z node_boot_time_seconds ']' + '[' -z glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221549&end=1773221609&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221553 ++ /usr/bin/date -u +%s + local end=1773221613 + let retry+=1 + [[ 1 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221553&end=1773221613&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221556 ++ /usr/bin/date -u +%s + local end=1773221616 + let retry+=1 + [[ 2 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221556&end=1773221616&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221558 ++ /usr/bin/date -u +%s + local end=1773221618 + let retry+=1 + [[ 3 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221558&end=1773221618&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221561 ++ /usr/bin/date -u +%s + local end=1773221621 + let retry+=1 + [[ 4 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221561&end=1773221621&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221563 ++ /usr/bin/date -u +%s + local end=1773221623 + let retry+=1 + [[ 5 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221563&end=1773221623&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221566 ++ /usr/bin/date -u +%s + local end=1773221626 + let retry+=1 + [[ 6 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221566&end=1773221626&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221568 ++ /usr/bin/date -u +%s + local end=1773221628 + let retry+=1 + [[ 7 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221568&end=1773221628&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221570 ++ /usr/bin/date -u +%s + local end=1773221630 + let retry+=1 + [[ 8 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221570&end=1773221630&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221573 ++ /usr/bin/date -u +%s + local end=1773221633 + let retry+=1 + [[ 9 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221573&end=1773221633&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221575 ++ /usr/bin/date -u +%s + local end=1773221635 + let retry+=1 + [[ 10 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221575&end=1773221635&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221578 ++ /usr/bin/date -u +%s + local end=1773221638 + let retry+=1 + [[ 11 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221578&end=1773221638&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221580 ++ /usr/bin/date -u +%s + local end=1773221640 + let retry+=1 + [[ 12 -ge 30 ]] ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221580&end=1773221640&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221583 ++ /usr/bin/date -u +%s + local end=1773221643 + let retry+=1 + [[ 13 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221583&end=1773221643&step=60' ++ jq '.data.result[0].values[][1]' ++ grep '^"[0-9]' + [[ -n "1773214602" ]] + get_metric_values_pmm3 mysql_global_status_uptime pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0 glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0 + local token=glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221583 ++ /usr/bin/date -u +%s + local end=1773221643 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Pzs0o783LE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dc4QjbItIK +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Pzs0o783LE +++ cat /tmp/tmp.Dc4QjbItIK +++ rm /tmp/tmp.Pzs0o783LE /tmp/tmp.Dc4QjbItIK +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.xoYCTpDkC6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.anyffyRRV8 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.xoYCTpDkC6 +++ cat /tmp/tmp.anyffyRRV8 +++ rm /tmp/tmp.xoYCTpDkC6 /tmp/tmp.anyffyRRV8 +++ return 0 ++ endpoint=35.184.158.12 ++ '[' -n 35.184.158.12 ']' ++ '[' 35.184.158.12 '!=' null ']' ++ echo 35.184.158.12 ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ return + local endpoint=35.184.158.12 + '[' -z mysql_global_status_uptime ']' + '[' -z glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221583&end=1773221643&step=60' ++ grep '^"[0-9]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221588 ++ /usr/bin/date -u +%s + local end=1773221648 + let retry+=1 + [[ 1 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221588&end=1773221648&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221591 ++ /usr/bin/date -u +%s + local end=1773221651 + let retry+=1 + [[ 2 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221591&end=1773221651&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' jq: error (at :0): Cannot iterate over null (null) + [[ -n '' ]] + sleep 2 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221593 ++ /usr/bin/date -u +%s + local end=1773221653 + let retry+=1 + [[ 3 -ge 30 ]] ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-pxc-0%22%7D%29&start=1773221593&end=1773221653&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' + [[ -n "13" ]] + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values_pmm3 haproxy_backend_status pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0 glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0 + local token=glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221593 ++ /usr/bin/date -u +%s + local end=1773221653 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.8LQggSmQzK ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9dbSpeMMg6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.8LQggSmQzK +++ cat /tmp/tmp.9dbSpeMMg6 +++ rm /tmp/tmp.8LQggSmQzK /tmp/tmp.9dbSpeMMg6 +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.5x4N8guaYM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.5mIDronyVh +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.5x4N8guaYM +++ cat /tmp/tmp.5mIDronyVh +++ rm /tmp/tmp.5x4N8guaYM /tmp/tmp.5mIDronyVh +++ return 0 ++ endpoint=35.184.158.12 ++ '[' -n 35.184.158.12 ']' ++ '[' 35.184.158.12 '!=' null ']' ++ echo 35.184.158.12 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=35.184.158.12 + '[' -z haproxy_backend_status ']' + '[' -z glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ']' + local wait_count=30 + local retry=0 ++ jq '.data.result[0].values[][1]' ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0%22%7D%29&start=1773221593&end=1773221653&step=60' ++ grep '^"[0-9]' + [[ -n "0" "0" ]] + get_metric_values_pmm3 haproxy_backend_active_servers pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0 glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0 + local token=glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1773221596 ++ /usr/bin/date -u +%s + local end=1773221656 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.PVA3ClselT ++++ mktemp +++ local LAST_ERR=/tmp/tmp.KaRypbRqFw +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.PVA3ClselT +++ cat /tmp/tmp.KaRypbRqFw +++ rm /tmp/tmp.PVA3ClselT /tmp/tmp.KaRypbRqFw +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.d7ogPodkjp ++++ mktemp +++ local LAST_ERR=/tmp/tmp.i3Mtnk8mgb +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.d7ogPodkjp +++ cat /tmp/tmp.i3Mtnk8mgb +++ rm /tmp/tmp.d7ogPodkjp /tmp/tmp.i3Mtnk8mgb +++ return 0 ++ endpoint=35.184.158.12 ++ '[' -n 35.184.158.12 ']' ++ '[' 35.184.158.12 '!=' null ']' ++ head -n 1 ++ echo 35.184.158.12 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=35.184.158.12 + '[' -z haproxy_backend_active_servers ']' + '[' -z glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43 ']' + local wait_count=30 + local retry=0 ++ curl -s -k -H 'Authorization: Bearer glsa_uaBu4ckdKOsqkUXBOzZUv6rTvz4bjwv4_25f50b43' 'https://35.184.158.12/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-pmm3-7039-monitoring-haproxy-0%22%7D%29&start=1773221596&end=1773221656&step=60' ++ grep '^"[0-9]' ++ jq '.data.result[0].values[][1]' + [[ -n "1" "0" ]] + desc 'switch from haproxy to proxysql' + set +o xtrace ----------------------------------------------------------------------------------- switch from haproxy to proxysql ----------------------------------------------------------------------------------- + kubectl_bin patch pxc monitoring --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' ++ mktemp + local LAST_OUT=/tmp/tmp.gvk7NoWcpp ++ mktemp + local LAST_ERR=/tmp/tmp.wXv4abgkvG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type=json -p '[ {"op": "replace", "path": "/spec/haproxy/enabled", "value": false}, {"op": "replace", "path": "/spec/proxysql/enabled", "value": true} ]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gvk7NoWcpp perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.wXv4abgkvG + rm /tmp/tmp.gvk7NoWcpp /tmp/tmp.wXv4abgkvG + return 0 + wait_for_delete sts/monitoring-haproxy + local res=sts/monitoring-haproxy + echo -n 'waiting for sts/monitoring-haproxy to be deleted' waiting for sts/monitoring-haproxy to be deleted+ set +o xtrace ........................................................................................................................2026-03-11T09:19:33.549Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.32.12-gke.1127000"} 2026-03-11T09:19:33.549Z INFO setup Manager starting up {"gitCommit": "7f4bfbf44130eef78e7b2b7137fa04bd4427267a", "gitBranch": "PR-2384-7f4bfbf4", "buildTime": "2026-03-11T07:27:21Z", "goVersion": "go1.25.8", "os": "linux", "arch": "amd64"} 2026-03-11T09:19:33.549Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2026-03-11T09:19:33.552Z INFO setup Registering Components. 2026-03-11T09:19:35.044Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2026-03-11T09:19:35.045Z INFO setup Starting the Cmd. 2026-03-11T09:19:35.045Z INFO controller-runtime.metrics Starting metrics server 2026-03-11T09:19:35.045Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2026-03-11T09:19:35.045Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2026-03-11T09:19:35.045Z INFO controller-runtime.webhook Starting webhook server 2026-03-11T09:19:35.045Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2026-03-11T09:19:35.045Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2026-03-11T09:19:35.045Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2026-03-11T09:19:35.147Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2026-03-11T09:19:35.297Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2026-03-11T09:19:35.298Z DEBUG events percona-xtradb-cluster-operator-944bd69c8-9g8wj_d2983e50-fe67-44e3-8212-42c693eb6137 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"6fdf6b28-763f-492d-ab6c-6b26c8446896","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1773220775286335009"}, "reason": "LeaderElection"} 2026-03-11T09:19:35.298Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2026-03-11T09:19:35.298Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2026-03-11T09:19:35.298Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2026-03-11T09:19:35.298Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2026-03-11T09:19:35.399Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2026-03-11T09:19:35.399Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2026-03-11T09:19:35.399Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2026-03-11T09:19:35.399Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2026-03-11T09:19:35.399Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2026-03-11T09:19:35.399Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2026-03-11T09:21:51.524Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "version": "1.20.0"} 2026-03-11T09:21:53.357Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-03-11T09:21:53.412Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2026-03-11T09:21:53.468Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:21:53.542Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:21:53.582Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:21:53.682Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "9bd6ed8a-2fa3-49f7-83f2-ad86df3681cf", "object": "monitoring-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2026-03-11T09:21:54.568Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "d31e919a-76a4-401b-ba3c-b1ce3b9c35f2", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-03-11T09:21:54.591Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "d31e919a-76a4-401b-ba3c-b1ce3b9c35f2", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2026-03-11T09:22:45.240Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219", "user": "operator"} 2026-03-11T09:22:45.274Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219", "user": "monitor"} 2026-03-11T09:22:45.325Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219"} 2026-03-11T09:22:45.357Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219"} 2026-03-11T09:22:45.387Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219", "user": "xtrabackup"} 2026-03-11T09:22:45.430Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219"} 2026-03-11T09:22:45.466Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "36b40102-6726-4e6a-bf58-da5f6e8c0219", "user": "replication"} 2026-03-11T09:25:23.485Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "d7a1cedb-b3cc-4c81-9bb7-e6ca011549b6", "user": "root"} 2026-03-11T09:25:23.565Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "d7a1cedb-b3cc-4c81-9bb7-e6ca011549b6", "new version": "8.0.43-34.1"} 2026-03-11T09:28:55.702Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "20ac3db6-6f4e-4bd7-824e-fd617bee125e", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "monitoring-pmm3-7039", SelfLink: "", - UID: "aed0468f-2ce6-4057-91bb-826d632d16b7", + UID: "", - ResourceVersion: "1773221119492543005", + ResourceVersion: "", - Generation: 1, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2026-03-11 09:21:53 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJiMTY2ZjFmNWRmMzhkY2I5MGJlZjExNzBjMWQ4YTkwNiIsInBlcmNvbmEu"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "229727d6-eec0-4605-a0ad-91ce2629a848", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:21:53 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:25:19 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &3, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", + "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", "percona.com/ssl-hash": "b166f1f5df38dcb90bef1170c1d8a906", "percona.com/ssl-internal-hash": "6b13f686eee134eb1acf1a21d6213501", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "monitoring-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "some-name-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-monitoring-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-monitoring", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: nil, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "", HostPort: 0, ContainerPort: 7777, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30100, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30101, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30102, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30103, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30104, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30105, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: []v1.EnvVar{ { Name: "POD_NAME", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.name", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, { Name: "POD_NAMESPACE", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.namespace", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"}, ... // 18 identical elements {Name: "DB_HOST", Value: "localhost"}, {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"}, { Name: "PMM_AGENT_SETUP_NODE_NAME", - Value: "$(POD_NAMESPACE)-$(POD_NAME)", + Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)", ValueFrom: nil, }, {Name: "DB_PORT", Value: "33062"}, {Name: "DB_TYPE", Value: "mysql"}, ... // 2 identical elements }, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, ResizePolicy: nil, ... // 2 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 300, TimeoutSeconds: 5, ... // 4 identical fields }, ReadinessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 15, TimeoutSeconds: 15, ... // 4 identical fields }, StartupProbe: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: nil, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 6 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "monitoring-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 1, + ObservedGeneration: 0, - Replicas: 3, + Replicas: 0, - ReadyReplicas: 3, + ReadyReplicas: 0, - CurrentReplicas: 3, + CurrentReplicas: 0, - UpdatedReplicas: 3, + UpdatedReplicas: 0, - CurrentRevision: "monitoring-pxc-8597498866", + CurrentRevision: "", - UpdateRevision: "monitoring-pxc-8597498866", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 3, + AvailableReplicas: 0, }, } 2026-03-11T09:28:55.767Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "20ac3db6-6f4e-4bd7-824e-fd617bee125e", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "monitoring-pmm3-7039", SelfLink: "", - UID: "9e03366e-d509-451f-a48a-e41c633d4416", + UID: "", - ResourceVersion: "1773220991270127009", + ResourceVersion: "", - Generation: 1, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2026-03-11 09:21:53 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "229727d6-eec0-4605-a0ad-91ce2629a848", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:21:53 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:23:11 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &2, Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "haproxy", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", + "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "haproxy-custom", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "monitoring-haproxy"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}}, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-monitoring", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "my-env-var-secrets", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "my-env-var-secrets", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, {Name: "bin", VolumeSource: {EmptyDir: &{}}}, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: nil, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "", HostPort: 0, ContainerPort: 7777, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30100, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30101, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30102, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30103, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30104, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30105, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: []v1.EnvVar{ { Name: "POD_NAME", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.name", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, { Name: "POD_NAMESPACE", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.namespace", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"}, ... // 18 identical elements {Name: "DB_HOST", Value: "localhost"}, {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"}, { Name: "PMM_AGENT_SETUP_NODE_NAME", - Value: "$(POD_NAMESPACE)-$(POD_NAME)", + Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)", ValueFrom: nil, }, {Name: "DB_TYPE", Value: "haproxy"}, {Name: "MONITOR_USER", Value: "monitor"}, ... // 3 identical elements }, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, ResizePolicy: nil, ... // 2 identical fields VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 300, TimeoutSeconds: 5, ... // 4 identical fields }, ReadinessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 15, TimeoutSeconds: 15, ... // 4 identical fields }, StartupProbe: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 3 identical fields Args: {"haproxy"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-replicas", HostPort: 0, ContainerPort: 3307, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "proxy-protocol", HostPort: 0, ContainerPort: 3309, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "stats", HostPort: 0, ContainerPort: 8404, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}}, ... // 8 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &30, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 2 identical fields HostIPC: false, ShareProcessNamespace: nil, - SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., + SecurityContext: nil, ImagePullSecrets: nil, Hostname: "", Subdomain: "", Affinity: nil, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 6 identical fields }, }, VolumeClaimTemplates: nil, ServiceName: "monitoring-haproxy", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 1, + ObservedGeneration: 0, - Replicas: 2, + Replicas: 0, - ReadyReplicas: 2, + ReadyReplicas: 0, - CurrentReplicas: 2, + CurrentReplicas: 0, - UpdatedReplicas: 2, + UpdatedReplicas: 0, - CurrentRevision: "monitoring-haproxy-7bd9d8dfc6", + CurrentRevision: "", - UpdateRevision: "monitoring-haproxy-7bd9d8dfc6", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 2, + AvailableReplicas: 0, }, } 2026-03-11T09:28:55.848Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "20ac3db6-6f4e-4bd7-824e-fd617bee125e", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "monitoring-pmm3-7039", SelfLink: "", - UID: "9e03366e-d509-451f-a48a-e41c633d4416", + UID: "", - ResourceVersion: "1773220991270127009", + ResourceVersion: "", - Generation: 1, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2026-03-11 09:21:53 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "229727d6-eec0-4605-a0ad-91ce2629a848", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:21:53 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:23:11 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &2, Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "haproxy", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", + "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "haproxy-custom", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "monitoring-haproxy"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}}, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-monitoring", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "my-env-var-secrets", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "my-env-var-secrets", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, {Name: "bin", VolumeSource: {EmptyDir: &{}}}, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: nil, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "", HostPort: 0, ContainerPort: 7777, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30100, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30101, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30102, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30103, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30104, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30105, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: []v1.EnvVar{ { Name: "POD_NAME", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.name", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, { Name: "POD_NAMESPACE", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.namespace", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"}, ... // 18 identical elements {Name: "DB_HOST", Value: "localhost"}, {Name: "CLUSTER_NAME", Value: "foo-custom-cluster-name"}, { Name: "PMM_AGENT_SETUP_NODE_NAME", - Value: "$(POD_NAMESPACE)-$(POD_NAME)", + Value: "$(PMM_PREFIX)$(POD_NAMESPACE)-$(POD_NAME)", ValueFrom: nil, }, {Name: "DB_TYPE", Value: "haproxy"}, {Name: "MONITOR_USER", Value: "monitor"}, ... // 3 identical elements }, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, ResizePolicy: nil, ... // 2 identical fields VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 300, TimeoutSeconds: 5, ... // 4 identical fields }, ReadinessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 15, TimeoutSeconds: 15, ... // 4 identical fields }, StartupProbe: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 3 identical fields Args: {"haproxy"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-replicas", HostPort: 0, ContainerPort: 3307, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "proxy-protocol", HostPort: 0, ContainerPort: 3309, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "stats", HostPort: 0, ContainerPort: 8404, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}}, ... // 8 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &30, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 2 identical fields HostIPC: false, ShareProcessNamespace: nil, - SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., + SecurityContext: nil, ImagePullSecrets: nil, Hostname: "", Subdomain: "", Affinity: nil, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 6 identical fields }, }, VolumeClaimTemplates: nil, ServiceName: "monitoring-haproxy", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 1, + ObservedGeneration: 0, - Replicas: 2, + Replicas: 0, - ReadyReplicas: 2, + ReadyReplicas: 0, - CurrentReplicas: 2, + CurrentReplicas: 0, - UpdatedReplicas: 2, + UpdatedReplicas: 0, - CurrentRevision: "monitoring-haproxy-7bd9d8dfc6", + CurrentRevision: "", - UpdateRevision: "monitoring-haproxy-7bd9d8dfc6", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 2, + AvailableReplicas: 0, }, } 2026-03-11T09:28:56.156Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "20ac3db6-6f4e-4bd7-824e-fd617bee125e", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-2: dial tcp: lookup monitoring-pxc-2.monitoring-pxc.monitoring-pmm3-7039 on 34.118.224.10:53: no such host"} 2026-03-11T09:30:43.930Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "41aad2e0-a488-4fd1-9770-02addffd885b", "err": "failed to connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-pmm3-7039 on 34.118.224.10:53: no such host"} 2026-03-11T09:30:44.187Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "8533b501-538c-4400-9a7a-9adacaa055f7", "err": "failed to connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-pmm3-7039 on 34.118.224.10:53: no such host"} 2026-03-11T09:30:50.011Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "434b469e-4083-4e2c-9791-c6ba0c976cb5", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-pmm3-7039 on 34.118.224.10:53: no such host"} 2026-03-11T09:31:36.999Z INFO Password changed, updating user {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "user": "pmmservertoken"} 2026-03-11T09:31:37.022Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "user": "pmmservertoken"} 2026-03-11T09:31:37.022Z INFO PXC pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "last-applied-secret": "69e43fde94585f47fd312c03e66a50f688e4b6059c6cdfea2ffb234d2bf0188b"} 2026-03-11T09:31:37.022Z INFO HAProxy pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "last-applied-secret": "69e43fde94585f47fd312c03e66a50f688e4b6059c6cdfea2ffb234d2bf0188b"} 2026-03-11T09:31:37.024Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "monitoring-pmm3-7039", SelfLink: "", - UID: "aed0468f-2ce6-4057-91bb-826d632d16b7", + UID: "", - ResourceVersion: "1773221493948527005", + ResourceVersion: "", - Generation: 2, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2026-03-11 09:21:53 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI2OWU0M2ZkZTk0NTg1ZjQ3ZmQzMTJjMDNlNjZhNTBmNjg4ZTRiNjA1OWM2Y2RmZWEyZmZiMjM0ZDJiZjAxODhiIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoiZDQxZDhjZDk4ZjAw"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "229727d6-eec0-4605-a0ad-91ce2629a848", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:28:55 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:31:33 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &3, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "pxc", + "last-applied-secret": "69e43fde94585f47fd312c03e66a50f688e4b6059c6cdfea2ffb234d2bf0188b", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", ... // 2 identical entries }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "monitoring-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "some-name-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-monitoring-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-monitoring", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "monitoring-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: nil, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "", HostPort: 0, ContainerPort: 7777, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30100, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30101, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30102, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30103, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30104, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30105, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: []v1.EnvVar{ { Name: "POD_NAME", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.name", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, { Name: "POD_NAMESPACE", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.namespace", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"}, ... // 25 identical elements }, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, ResizePolicy: nil, ... // 2 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 300, TimeoutSeconds: 5, ... // 4 identical fields }, ReadinessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 15, TimeoutSeconds: 15, ... // 4 identical fields }, StartupProbe: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: nil, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 6 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "monitoring-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 2, + ObservedGeneration: 0, - Replicas: 3, + Replicas: 0, - ReadyReplicas: 3, + ReadyReplicas: 0, - CurrentReplicas: 3, + CurrentReplicas: 0, - UpdatedReplicas: 3, + UpdatedReplicas: 0, - CurrentRevision: "monitoring-pxc-674cdf9f88", + CurrentRevision: "", - UpdateRevision: "monitoring-pxc-674cdf9f88", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 3, + AvailableReplicas: 0, }, } 2026-03-11T09:31:37.103Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "5f8c5ca8-6d0c-47f0-a864-7443c28e759f", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "monitoring-pmm3-7039", SelfLink: "", - UID: "9e03366e-d509-451f-a48a-e41c633d4416", + UID: "", - ResourceVersion: "1773221391990719009", + ResourceVersion: "", - Generation: 2, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2026-03-11 09:21:53 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI2OWU0M2ZkZTk0NTg1ZjQ3ZmQzMTJjMDNlNjZhNTBmNjg4ZTRiNjA1OWM2Y2RmZWEyZmZiMjM0ZDJiZjAxODhiIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoi"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "229727d6-eec0-4605-a0ad-91ce2629a848", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:28:55 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2026-03-11 09:29:51 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &2, Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "haproxy", + "last-applied-secret": "69e43fde94585f47fd312c03e66a50f688e4b6059c6cdfea2ffb234d2bf0188b", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ { Name: "haproxy-custom", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "monitoring-haproxy"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}}, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-monitoring", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "my-env-var-secrets", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "my-env-var-secrets", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, {Name: "bin", VolumeSource: {EmptyDir: &{}}}, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: nil, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "", HostPort: 0, ContainerPort: 7777, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30100, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30101, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30102, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30103, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30104, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "", HostPort: 0, ContainerPort: 30105, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: []v1.EnvVar{ { Name: "POD_NAME", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.name", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, { Name: "POD_NAMESPACE", Value: "", ValueFrom: &v1.EnvVarSource{ FieldRef: &v1.ObjectFieldSelector{ - APIVersion: "v1", + APIVersion: "", FieldPath: "metadata.namespace", }, ResourceFieldRef: nil, ConfigMapKeyRef: nil, ... // 2 identical fields }, }, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, {Name: "PMM_AGENT_SERVER_USERNAME", Value: "service_token"}, ... // 26 identical elements }, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, ResizePolicy: nil, ... // 2 identical fields VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 300, TimeoutSeconds: 5, ... // 4 identical fields }, ReadinessProbe: &v1.Probe{ ProbeHandler: v1.ProbeHandler{ Exec: nil, HTTPGet: &v1.HTTPGetAction{ Path: "/local/Status", Port: {IntVal: 7777}, Host: "", - Scheme: "HTTP", + Scheme: "", HTTPHeaders: nil, }, TCPSocket: nil, GRPC: nil, }, InitialDelaySeconds: 15, TimeoutSeconds: 15, ... // 4 identical fields }, StartupProbe: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 3 identical fields Args: {"haproxy"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-replicas", HostPort: 0, ContainerPort: 3307, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "proxy-protocol", HostPort: 0, ContainerPort: 3309, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "stats", HostPort: 0, ContainerPort: 8404, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}}, ... // 8 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &30, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 2 identical fields HostIPC: false, ShareProcessNamespace: nil, - SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., + SecurityContext: nil, ImagePullSecrets: nil, Hostname: "", Subdomain: "", Affinity: nil, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 6 identical fields }, }, VolumeClaimTemplates: nil, ServiceName: "monitoring-haproxy", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 2, + ObservedGeneration: 0, - Replicas: 2, + Replicas: 0, - ReadyReplicas: 2, + ReadyReplicas: 0, - CurrentReplicas: 2, + CurrentReplicas: 0, - UpdatedReplicas: 2, + UpdatedReplicas: 0, - CurrentRevision: "monitoring-haproxy-8684668678", + CurrentRevision: "", - UpdateRevision: "monitoring-haproxy-8684668678", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 2, + AvailableReplicas: 0, }, } 2026-03-11T09:32:37.015Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "fe1ebb7d-6583-450e-aab4-09373a972908", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp: lookup monitoring-pxc-1.monitoring-pxc.monitoring-pmm3-7039 on 34.118.224.10:53: no such host"} [mysql] 2026/03/11 09:33:51 packets.go:58 unexpected EOF 2026-03-11T09:34:20.862Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "eea95f81-fdc3-43ad-902f-0c0cbb4d3f16", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:20.899Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "38eaf306-1ade-4a1a-ad26-41466f78eedc", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:20.935Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "25001d1a-92db-415c-9705-bcc73b34388c", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:20.981Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "93193e41-92e8-4b46-bd66-cdd01404b3b5", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:21.048Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "ff9a8236-03a0-42ea-9b14-e6519cfaf14d", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:21.154Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "06351392-5960-4af9-aef7-13d7b5f31f63", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:21.341Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "14e07d24-3675-499f-8f2f-21ed6e623f59", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:21.687Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "c34a07b2-c014-4a81-8ded-7d2995202244", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:22.351Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "14ab528a-3469-432b-9c8f-f8daabfdc574", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:23.657Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "b9a65269-3af2-497f-85b5-17114112fc97", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:26.242Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "288a13b9-a9d4-432a-a794-92f8daba5667", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:31.393Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "94313c82-2edb-4153-b8de-66c0dfcd9269", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:34:41.695Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "cb7f095c-0442-401a-89d3-fdaafa7a8423", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:35:02.206Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "8943860c-887c-49a5-9aa3-90f1f2c8b54b", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:35:43.190Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "d2a401cb-4b2c-46da-8212-4f2199431740", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 2026-03-11T09:37:05.135Z ERROR Reconciler error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-pmm3-7039"}, "namespace": "monitoring-pmm3-7039", "name": "monitoring", "reconcileID": "cfdf5f96-f1f1-4b01-a6d2-cc3c5bd6f9c8", "error": "wrong PXC options: validate cr: ProxySQL: volumeSpec should be specified", "errorVerbose": "ProxySQL: volumeSpec should be specified\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).Validate\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:483\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1049\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nvalidate cr\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1.(*PerconaXtraDBCluster).CheckNSetDefaults\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/apis/pxc/v1/pxc_types.go:1051\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:267\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\nwrong PXC options\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc.(*ReconcilePerconaXtraDBCluster).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxc/controller.go:269\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:222\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:479\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:495 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:438 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.23.1/pkg/internal/controller/controller.go:313 max retry count 120 reached. something went wrong with operator or kubernetes cluster