Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/logs/monitoring-2-0-8-0.log Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.35) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=monitoring + create_infra monitoring-2-0-18368 + local ns=monitoring-2-0-18368 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-5321 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.ic4Rf4KlyU ++ mktemp + local LAST_ERR=/tmp/tmp.bfhChpEKKO + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ic4Rf4KlyU perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-5321 namespace + cat /tmp/tmp.bfhChpEKKO + rm /tmp/tmp.ic4Rf4KlyU /tmp/tmp.bfhChpEKKO + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.kLE8FoS3zR ++ mktemp + local LAST_ERR=/tmp/tmp.6OujiQ5rb6 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kLE8FoS3zR No resources found + cat /tmp/tmp.6OujiQ5rb6 + rm /tmp/tmp.kLE8FoS3zR /tmp/tmp.6OujiQ5rb6 + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.suQcBoCLW1 ++ mktemp + local LAST_ERR=/tmp/tmp.MJAzwdkiTR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.suQcBoCLW1 No resources found + cat /tmp/tmp.MJAzwdkiTR + rm /tmp/tmp.suQcBoCLW1 /tmp/tmp.MJAzwdkiTR + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + xargs kubectl delete ns + awk '{print$1}' + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.ybD7jSWrJp ++ mktemp + local LAST_ERR=/tmp/tmp.i6N7cCYfzd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + local LAST_OUT=/tmp/tmp.y2mEAHj4Jk ++ mktemp + local LAST_ERR=/tmp/tmp.U1MJkFO3tw + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ybD7jSWrJp + cat /tmp/tmp.i6N7cCYfzd + rm /tmp/tmp.ybD7jSWrJp /tmp/tmp.i6N7cCYfzd + return 0 namespace "monitoring-2-0-5321" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.y2mEAHj4Jk namespace "pxc-operator" deleted + cat /tmp/tmp.U1MJkFO3tw + rm /tmp/tmp.y2mEAHj4Jk /tmp/tmp.U1MJkFO3tw + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.N2HzxToID5 ++ mktemp + local LAST_ERR=/tmp/tmp.kjwdWJ2iit + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.N2HzxToID5 namespace/pxc-operator created + cat /tmp/tmp.kjwdWJ2iit + rm /tmp/tmp.N2HzxToID5 /tmp/tmp.kjwdWJ2iit + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.djyVTKURh4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.7zVmeh2lW8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.djyVTKURh4 ++ cat /tmp/tmp.7zVmeh2lW8 ++ rm /tmp/tmp.djyVTKURh4 /tmp/tmp.7zVmeh2lW8 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.RhgEegc2tN ++ mktemp + local LAST_ERR=/tmp/tmp.O5vloSB0hb + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RhgEegc2tN Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5" modified. + cat /tmp/tmp.O5vloSB0hb + rm /tmp/tmp.RhgEegc2tN /tmp/tmp.O5vloSB0hb + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.ix5OVjZIAl ++ mktemp + local LAST_ERR=/tmp/tmp.ILfMKlkegs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ix5OVjZIAl customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.ILfMKlkegs + rm /tmp/tmp.ix5OVjZIAl /tmp/tmp.ILfMKlkegs + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.RfMezNiJIg ++ mktemp + local LAST_ERR=/tmp/tmp.9v6qKuVQUN + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RfMezNiJIg clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.9v6qKuVQUN + rm /tmp/tmp.RfMezNiJIg /tmp/tmp.9v6qKuVQUN + return 0 + kubectl_bin apply -f - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2323-64f0860f^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/deploy/cw-operator.yaml + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "PXCO_FEATURE_GATES").value) = ""' - ++ mktemp + local LAST_OUT=/tmp/tmp.ikdTKgWFzg ++ mktemp + local LAST_ERR=/tmp/tmp.LQwhEwlXZ9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ikdTKgWFzg deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.LQwhEwlXZ9 + rm /tmp/tmp.ikdTKgWFzg /tmp/tmp.LQwhEwlXZ9 + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.lgKPsDc8t7 ++ mktemp + local LAST_ERR=/tmp/tmp.75CuVS9u4X + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.lgKPsDc8t7 pod/percona-xtradb-cluster-operator-5449f85878-ljgfx condition met + cat /tmp/tmp.75CuVS9u4X + rm /tmp/tmp.lgKPsDc8t7 /tmp/tmp.75CuVS9u4X + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.ohzoamG5Hv +++ mktemp ++ local LAST_ERR=/tmp/tmp.92K7mTToyB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ohzoamG5Hv ++ cat /tmp/tmp.92K7mTToyB ++ rm /tmp/tmp.ohzoamG5Hv /tmp/tmp.92K7mTToyB ++ return 0 + wait_pod percona-xtradb-cluster-operator-5449f85878-ljgfx 480 pxc-operator + local pod=percona-xtradb-cluster-operator-5449f85878-ljgfx + local max_retry=480 + local ns=pxc-operator ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo percona-xtradb-cluster-operator-5449f85878-ljgfx + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-5449f85878-ljgfx condition met waiting for pod/percona-xtradb-cluster-operator-5449f85878-ljgfx to become Ready.Ok + sleep 3 + create_namespace monitoring-2-0-18368 + local namespace=monitoring-2-0-18368 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 ++ helm list --all-namespaces --filter chaos-mesh + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces monitoring-2-0-18368' + set +o xtrace + xargs kubectl delete ns + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ----------------------------------------------------------------------------------- cleaned up old namespaces monitoring-2-0-18368 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace monitoring-2-0-18368 ++ mktemp ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.HZsrOGG4K1 + local LAST_OUT=/tmp/tmp.7YHWoopGtK ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.VdjGjACDdv + local exit_status=0 + local LAST_ERR=/tmp/tmp.7wseWLY6Gy + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-18368 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-18368 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7YHWoopGtK + cat /tmp/tmp.7wseWLY6Gy + rm /tmp/tmp.7YHWoopGtK /tmp/tmp.7wseWLY6Gy + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace monitoring-2-0-18368 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.HZsrOGG4K1 + cat /tmp/tmp.VdjGjACDdv Error from server (NotFound): namespaces "monitoring-2-0-18368" not found + rm /tmp/tmp.HZsrOGG4K1 /tmp/tmp.VdjGjACDdv + return 1 + : + wait_for_delete namespace/monitoring-2-0-18368 + local res=namespace/monitoring-2-0-18368 + echo -n 'waiting for namespace/monitoring-2-0-18368 to be deleted' waiting for namespace/monitoring-2-0-18368 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "monitoring-2-0-18368" not found + desc 'create namespace monitoring-2-0-18368' + set +o xtrace ----------------------------------------------------------------------------------- create namespace monitoring-2-0-18368 ----------------------------------------------------------------------------------- + kubectl_bin create namespace monitoring-2-0-18368 ++ mktemp + local LAST_OUT=/tmp/tmp.8P1CbLWdDR ++ mktemp + local LAST_ERR=/tmp/tmp.7Ie0rmbLIx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace monitoring-2-0-18368 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8P1CbLWdDR namespace/monitoring-2-0-18368 created + cat /tmp/tmp.7Ie0rmbLIx + rm /tmp/tmp.8P1CbLWdDR /tmp/tmp.7Ie0rmbLIx + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.iMl11Owc18 +++ mktemp ++ local LAST_ERR=/tmp/tmp.8dDqEs98FT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.iMl11Owc18 ++ cat /tmp/tmp.8dDqEs98FT ++ rm /tmp/tmp.iMl11Owc18 /tmp/tmp.8dDqEs98FT ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5 --namespace=monitoring-2-0-18368 ++ mktemp + local LAST_OUT=/tmp/tmp.ol29DSABJV ++ mktemp + local LAST_ERR=/tmp/tmp.L8h3FETFqJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5 --namespace=monitoring-2-0-18368 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ol29DSABJV Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2323-64f0860f-2-cluster5" modified. + cat /tmp/tmp.L8h3FETFqJ + rm /tmp/tmp.ol29DSABJV /tmp/tmp.L8h3FETFqJ + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.v49j38DaEF ++ mktemp + local LAST_ERR=/tmp/tmp.7KYHh6aqD3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.v49j38DaEF secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.7KYHh6aqD3 + rm /tmp/tmp.v49j38DaEF /tmp/tmp.7KYHh6aqD3 + return 0 + deploy_helm monitoring-2-0-18368 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository ...Successfully got an update from the "chaos-mesh" chart repository Update Complete. ⎈Happy Helming!⎈ + desc 'install PMM Server' + set +o xtrace ----------------------------------------------------------------------------------- install PMM Server ----------------------------------------------------------------------------------- + deploy_pmm_server + '[' '!' -z '' ']' + helm repo add percona https://percona.github.io/percona-helm-charts/ "percona" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "chaos-mesh" chart repository ...Successfully got an update from the "percona" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + helm uninstall monitoring Error: uninstall: Release not loaded: monitoring: release: not found + : + retry 10 60 helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz + local max=10 + local delay=60 + shift 2 + local n=1 + helm install monitoring --set imageRepo=perconalab/pmm-server --set imageTag=dev-latest https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz NAME: monitoring LAST DEPLOYED: Thu Dec 18 10:52:56 2025 NAMESPACE: monitoring-2-0-18368 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: endpoint: https://monitoring-service.monitoring-2-0-18368.svc.cluster.local:443 login: admin password: admin + wait_for_pmm_service + timeout=420 ++ date +%s + start=1766055177 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055178 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055181 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055184 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055188 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055191 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055194 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055197 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055200 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055203 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055206 - start >= timeout )) + sleep 2 + grep -q . + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' ++ date +%s + (( 1766055209 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . ++ date +%s + (( 1766055213 - start >= timeout )) + sleep 2 + kubectl_bin get svc monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[0]}' + grep -q . + kubectl_bin wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s ++ mktemp + local LAST_OUT=/tmp/tmp.RCKrB3x2ty ++ mktemp + local LAST_ERR=/tmp/tmp.DfG9ACWcVz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait sts/monitoring '--for=jsonpath={.status.readyReplicas}=1' --timeout=420s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.RCKrB3x2ty statefulset.apps/monitoring condition met + cat /tmp/tmp.DfG9ACWcVz + rm /tmp/tmp.RCKrB3x2ty /tmp/tmp.DfG9ACWcVz + return 0 ++ kubectl_bin exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zemI6ZqhuQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.aNHyqgEGde ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zemI6ZqhuQ ++ cat /tmp/tmp.aNHyqgEGde ++ rm /tmp/tmp.zemI6ZqhuQ /tmp/tmp.aNHyqgEGde ++ return 0 + ADMIN_PASSWORD=admin + sleep 5 + kubectl_bin exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' ++ mktemp + local LAST_OUT=/tmp/tmp.eUcRBnT3aY ++ mktemp + local LAST_ERR=/tmp/tmp.diiFW4Qx8X + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec monitoring-0 -- bash -c 'grafana-cli --homepath=/usr/share/grafana --config=/etc/grafana/grafana.ini admin reset-admin-password admin' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.eUcRBnT3aY logger=settings t=2025-12-18T10:53:45.670561117Z level=info msg="Starting Grafana" version= commit= branch= compiled=1970-01-01T00:00:00Z logger=settings t=2025-12-18T10:53:45.670700557Z level=info msg="Config loaded from" file=/usr/share/grafana/conf/defaults.ini logger=settings t=2025-12-18T10:53:45.670711387Z level=info msg="Config loaded from" file=/etc/grafana/grafana.ini logger=settings t=2025-12-18T10:53:45.670717117Z level=info msg="Path Home" path=/usr/share/grafana logger=settings t=2025-12-18T10:53:45.670722507Z level=info msg="Path Data" path=/srv/grafana logger=settings t=2025-12-18T10:53:45.670726977Z level=info msg="Path Logs" path=/srv/logs logger=settings t=2025-12-18T10:53:45.670731617Z level=info msg="Path Plugins" path=/srv/grafana/plugins logger=settings t=2025-12-18T10:53:45.670736067Z level=info msg="Path Provisioning" path=/usr/share/grafana/conf/provisioning logger=settings t=2025-12-18T10:53:45.670740837Z level=info msg="App mode production" logger=sqlstore t=2025-12-18T10:53:45.670791077Z level=info msg="Connecting to DB" dbtype=postgres logger=migrator t=2025-12-18T10:53:45.684674153Z level=info msg="Starting DB migrations" logger=migrator t=2025-12-18T10:53:45.690442295Z level=info msg="migrations completed" performed=0 skipped=452 duration=389.33µs logger=secrets t=2025-12-18T10:53:45.692711436Z level=info msg="Envelope encryption state" enabled=true currentprovider=secretKey.v1 logger=plugin.finder t=2025-12-18T10:53:45.72304054Z level=warn msg="Skipping finding plugins as directory does not exist" path=/usr/share/grafana/plugins-bundled logger=plugin.signature.validator t=2025-12-18T10:53:45.841125562Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-qan-app-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-qan logger=plugin.signature.validator t=2025-12-18T10:53:45.841168392Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=grafana-polystat-panel pluginDir=/srv/grafana/plugins/grafana-polystat-panel logger=plugin.signature.validator t=2025-12-18T10:53:45.841193322Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-app pluginDir=/srv/grafana/plugins/pmm-app/dist logger=plugin.pmm-app t=2025-12-18T10:53:45.847106615Z level=warn msg="Included dashboard is missing a UID field" logger=plugin.signature.validator t=2025-12-18T10:53:45.847313945Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-datasource pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/datasource logger=plugin.signature.validator t=2025-12-18T10:53:45.847342985Z level=warn msg="Permitting unsigned plugin. This is not recommended" pluginID=pmm-pt-summary-panel pluginDir=/srv/grafana/plugins/pmm-app/dist/pmm-pt-summary/panel logger=plugin.loader t=2025-12-18T10:53:45.847441325Z level=info msg="Plugin registered" pluginID=grafana-clickhouse-datasource logger=plugin.grafana-clickhouse-datasource t=2025-12-18T10:53:45.856754089Z level=warn msg="Plugin process is running with elevated privileges. This is not recommended" logger=plugin.loader t=2025-12-18T10:53:45.856773679Z level=info msg="Plugin registered" pluginID=grafana-worldmap-panel logger=plugin.loader t=2025-12-18T10:53:45.856781779Z level=info msg="Plugin registered" pluginID=pmm-qan-app-panel logger=plugin.loader t=2025-12-18T10:53:45.856787409Z level=info msg="Plugin registered" pluginID=grafana-polystat-panel logger=plugin.loader t=2025-12-18T10:53:45.856885899Z level=info msg="Plugin registered" pluginID=jdbranham-diagram-panel logger=plugin.loader t=2025-12-18T10:53:45.856893559Z level=info msg="Plugin registered" pluginID=pmm-app logger=plugin.loader t=2025-12-18T10:53:45.856899379Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-datasource logger=plugin.loader t=2025-12-18T10:53:45.856905339Z level=info msg="Plugin registered" pluginID=pmm-pt-summary-panel logger=plugin.loader t=2025-12-18T10:53:45.856910449Z level=info msg="Plugin registered" pluginID=natel-discrete-panel logger=plugin.loader t=2025-12-18T10:53:45.856917099Z level=info msg="Plugin registered" pluginID=camptocamp-prometheus-alertmanager-datasource logger=plugin.loader t=2025-12-18T10:53:45.856922319Z level=info msg="Plugin registered" pluginID=grafana-piechart-panel logger=plugin.loader t=2025-12-18T10:53:45.856927199Z level=info msg="Plugin registered" pluginID=petrslavotinek-carpetplot-panel Admin password changed successfully ✔ + cat /tmp/tmp.diiFW4Qx8X + rm /tmp/tmp.eUcRBnT3aY /tmp/tmp.diiFW4Qx8X + return 0 + desc 'create PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create PXC cluster ----------------------------------------------------------------------------------- + spinup_pxc monitoring /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml 3 120 /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/secrets.yaml + local cluster=monitoring + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml + local size=3 + local sleep=120 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/secrets.yaml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/secrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.WxOhC1ji7D ++ mktemp + local LAST_ERR=/tmp/tmp.BNJNL0Cgw7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/secrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WxOhC1ji7D secret/my-cluster-secrets created + cat /tmp/tmp.BNJNL0Cgw7 + rm /tmp/tmp.WxOhC1ji7D /tmp/tmp.BNJNL0Cgw7 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.fLU64toySS + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-18368~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + local LAST_ERR=/tmp/tmp.cA2k2zD3eI + local exit_status=0 + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2323-64f0860f#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fLU64toySS deployment.apps/pxc-client created + cat /tmp/tmp.cA2k2zD3eI + rm /tmp/tmp.fLU64toySS /tmp/tmp.cA2k2zD3eI + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/monitoring.yml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + local LAST_OUT=/tmp/tmp.OvOkZyWOOr + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2323-64f0860f#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.monitoring-2-0-18368~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' ++ mktemp + local LAST_ERR=/tmp/tmp.ETSqjqsK3H + local exit_status=0 ++ seq 0 2 + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OvOkZyWOOr perconaxtradbcluster.pxc.percona.com/monitoring created + cat /tmp/tmp.ETSqjqsK3H + rm /tmp/tmp.OvOkZyWOOr /tmp/tmp.ETSqjqsK3H + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy monitoring ++ local target_cluster=monitoring +++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.GAOXQLnrZ6 ++++ mktemp +++ local LAST_ERR=/tmp/tmp.uRz7bW1jni +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.GAOXQLnrZ6 +++ cat /tmp/tmp.uRz7bW1jni +++ rm /tmp/tmp.GAOXQLnrZ6 /tmp/tmp.uRz7bW1jni +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo monitoring-haproxy ++ return + local proxy=monitoring-haproxy + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-18368 ++ mktemp + local LAST_OUT=/tmp/tmp.p5evxaiqji ++ mktemp + local LAST_ERR=/tmp/tmp.0uiQ8SL3eS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n monitoring-2-0-18368 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.p5evxaiqji pod/monitoring-haproxy-0 condition met pod/monitoring-pxc-0 condition met + cat /tmp/tmp.0uiQ8SL3eS + rm /tmp/tmp.p5evxaiqji /tmp/tmp.0uiQ8SL3eS + return 0 + wait_for_running monitoring-haproxy 1 + local name=monitoring-haproxy + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-haproxy-0 480 + local pod=monitoring-haproxy-0 + local max_retry=480 + local ns= ++ echo monitoring-haproxy-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/monitoring-haproxy-0 condition met waiting for pod/monitoring-haproxy-0 to become Ready.Ok + wait_for_running monitoring-pxc 3 + local name=monitoring-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-0 480 + local pod=monitoring-pxc-0 + local max_retry=480 + local ns= ++ echo monitoring-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-0 condition met waiting for pod/monitoring-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-1 480 + local pod=monitoring-pxc-1 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo monitoring-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/monitoring-pxc-1 condition met waiting for pod/monitoring-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod monitoring-pxc-2 480 + local pod=monitoring-pxc-2 + local max_retry=480 + local ns= ++ echo monitoring-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/monitoring-pxc-2 condition met waiting for pod/monitoring-pxc-2 to become Ready.Ok + sleep 120 ++ kubectl get pxc monitoring -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.qRNaNUKmSC +++ mktemp ++ local LAST_ERR=/tmp/tmp.MEYvgBMmkA ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qRNaNUKmSC ++ cat /tmp/tmp.MEYvgBMmkA ++ rm /tmp/tmp.qRNaNUKmSC /tmp/tmp.MEYvgBMmkA ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.avC0IkxFxl +++ mktemp ++ local LAST_ERR=/tmp/tmp.dzR0VRf9t2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.avC0IkxFxl ++ cat /tmp/tmp.dzR0VRf9t2 ++ rm /tmp/tmp.avC0IkxFxl /tmp/tmp.dzR0VRf9t2 ++ return 0 + client_pod=pxc-client-c75dc5c46-fxms5 + wait_pod pxc-client-c75dc5c46-fxms5 + local pod=pxc-client-c75dc5c46-fxms5 + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-fxms5 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-fxms5 condition met waiting for pod/pxc-client-c75dc5c46-fxms5 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h monitoring-haproxy -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.r8sx9KBEjd +++ mktemp ++ local LAST_ERR=/tmp/tmp.RLH12CQKwb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.r8sx9KBEjd ++ cat /tmp/tmp.RLH12CQKwb ++ rm /tmp/tmp.r8sx9KBEjd /tmp/tmp.RLH12CQKwb ++ return 0 + client_pod=pxc-client-c75dc5c46-fxms5 + wait_pod pxc-client-c75dc5c46-fxms5 + local pod=pxc-client-c75dc5c46-fxms5 + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-fxms5 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-fxms5 condition met waiting for pod/pxc-client-c75dc5c46-fxms5 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-0.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qkmvIwyqx0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Y74eRluKf7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qkmvIwyqx0 ++ cat /tmp/tmp.Y74eRluKf7 ++ rm /tmp/tmp.qkmvIwyqx0 /tmp/tmp.Y74eRluKf7 ++ return 0 + client_pod=pxc-client-c75dc5c46-fxms5 + wait_pod pxc-client-c75dc5c46-fxms5 + local pod=pxc-client-c75dc5c46-fxms5 + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-fxms5 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-fxms5 condition met waiting for pod/pxc-client-c75dc5c46-fxms5 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.u1cffxxmJX/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.u1cffxxmJX/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.u1cffxxmJX/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-1.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.htMEAzPkUi +++ mktemp ++ local LAST_ERR=/tmp/tmp.kqHNfjwgPJ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.htMEAzPkUi ++ cat /tmp/tmp.kqHNfjwgPJ ++ rm /tmp/tmp.htMEAzPkUi /tmp/tmp.kqHNfjwgPJ ++ return 0 + client_pod=pxc-client-c75dc5c46-fxms5 + wait_pod pxc-client-c75dc5c46-fxms5 + local pod=pxc-client-c75dc5c46-fxms5 + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ echo pxc-client-c75dc5c46-fxms5 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-fxms5 condition met waiting for pod/pxc-client-c75dc5c46-fxms5 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.u1cffxxmJX/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.u1cffxxmJX/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.u1cffxxmJX/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1-80.sql ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + run_mysql 'SELECT * from myApp.myApp;' '-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h monitoring-pxc-2.monitoring-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JeqdikjU1X +++ mktemp ++ local LAST_ERR=/tmp/tmp.4A6eukl4ro ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JeqdikjU1X ++ cat /tmp/tmp.4A6eukl4ro ++ rm /tmp/tmp.JeqdikjU1X /tmp/tmp.4A6eukl4ro ++ return 0 + client_pod=pxc-client-c75dc5c46-fxms5 + wait_pod pxc-client-c75dc5c46-fxms5 + local pod=pxc-client-c75dc5c46-fxms5 + local max_retry=480 + local ns= ++ echo pxc-client-c75dc5c46-fxms5 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-c75dc5c46-fxms5 condition met waiting for pod/pxc-client-c75dc5c46-fxms5 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.u1cffxxmJX/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.u1cffxxmJX/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/select-1.sql /tmp/tmp.u1cffxxmJX/select-1.sql + is_keyring_plugin_in_use monitoring + local cluster=monitoring + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + kubectl exec -it monitoring-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' + grep -E -o 'early-plugin-load=keyring_\w+.so' Unable to use a TTY - input is not a terminal or the right kind of file + return 1 + desc 'add PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.KtDNkw18wq +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.1XJKLN3du0 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.KtDNkw18wq ++++ cat /tmp/tmp.1XJKLN3du0 ++++ rm /tmp/tmp.KtDNkw18wq /tmp/tmp.1XJKLN3du0 ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.vyxZoIw9c7 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.cERub21XZm ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.vyxZoIw9c7 ++++ cat /tmp/tmp.cERub21XZm ++++ rm /tmp/tmp.vyxZoIw9c7 /tmp/tmp.cERub21XZm ++++ return 0 +++ endpoint=35.222.192.160 +++ '[' -n 35.222.192.160 ']' +++ '[' 35.222.192.160 '!=' null ']' +++ echo 35.222.192.160 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@35.222.192.160/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 268 81 --:--:-- --:--:-- --:--:-- 349 + API_KEY='"eyJrIjoiTE1iUm9HRUYydDdFS0xMeXI3SnFXRU9sR2FYSlZYVDEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiTE1iUm9HRUYydDdFS0xMeXI3SnFXRU9sR2FYSlZYVDEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' ++ mktemp + local LAST_OUT=/tmp/tmp.7S4teKPwGC ++ mktemp + local LAST_ERR=/tmp/tmp.0M4r3reyRA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiTE1iUm9HRUYydDdFS0xMeXI3SnFXRU9sR2FYSlZYVDEiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.7S4teKPwGC secret/my-cluster-secrets patched + cat /tmp/tmp.0M4r3reyRA + rm /tmp/tmp.7S4teKPwGC /tmp/tmp.0M4r3reyRA + return 0 + wait_for_generation sts/monitoring-pxc 2 + local resource=sts/monitoring-pxc + local target_generation=2 + echo 'Waiting for sts/monitoring-pxc to reach generation 2...' Waiting for sts/monitoring-pxc to reach generation 2... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=1 + '[' 1 -eq 2 ']' + echo 'Resource sts/monitoring-pxc is at generation 1. Waiting...' Resource sts/monitoring-pxc is at generation 1. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-pxc has reached generation 2.' Resource sts/monitoring-pxc has reached generation 2. + break + wait_for_generation sts/monitoring-haproxy 2 + local resource=sts/monitoring-haproxy + local target_generation=2 + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' Waiting for sts/monitoring-haproxy to reach generation 2... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 2 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 2.' Resource sts/monitoring-haproxy has reached generation 2. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + wait_cluster_consistency monitoring 3 2 + local cluster_name=monitoring + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/monitoring to be ready' waiting for pxc/monitoring to be ready++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y7aKqlmtja +++ mktemp ++ local LAST_ERR=/tmp/tmp.xNJgIfJnhk ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.y7aKqlmtja ++ cat /tmp/tmp.xNJgIfJnhk ++ rm /tmp/tmp.y7aKqlmtja /tmp/tmp.xNJgIfJnhk ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CdblyQd6X2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.eBpUla3QuF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CdblyQd6X2 ++ cat /tmp/tmp.eBpUla3QuF ++ rm /tmp/tmp.CdblyQd6X2 /tmp/tmp.eBpUla3QuF ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.KVxxhqfZOX +++ mktemp ++ local LAST_ERR=/tmp/tmp.YO9lpxC5h3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.KVxxhqfZOX ++ cat /tmp/tmp.YO9lpxC5h3 ++ rm /tmp/tmp.KVxxhqfZOX /tmp/tmp.YO9lpxC5h3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Ha5QBOgA2N +++ mktemp ++ local LAST_ERR=/tmp/tmp.8N8m9Iifr2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Ha5QBOgA2N ++ cat /tmp/tmp.8N8m9Iifr2 ++ rm /tmp/tmp.Ha5QBOgA2N /tmp/tmp.8N8m9Iifr2 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3ZtSY3V66w +++ mktemp ++ local LAST_ERR=/tmp/tmp.4Khuql6dH6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3ZtSY3V66w ++ cat /tmp/tmp.4Khuql6dH6 ++ rm /tmp/tmp.3ZtSY3V66w /tmp/tmp.4Khuql6dH6 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.2BFFphXqV8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.snYqNCqRey ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.2BFFphXqV8 ++ cat /tmp/tmp.snYqNCqRey ++ rm /tmp/tmp.2BFFphXqV8 /tmp/tmp.snYqNCqRey ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zyyTlqfz5A +++ mktemp ++ local LAST_ERR=/tmp/tmp.8qfl6BEpL0 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zyyTlqfz5A ++ cat /tmp/tmp.8qfl6BEpL0 ++ rm /tmp/tmp.zyyTlqfz5A /tmp/tmp.8qfl6BEpL0 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.sM0jwvwfDE +++ mktemp ++ local LAST_ERR=/tmp/tmp.JoZdOPpcDg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.sM0jwvwfDE ++ cat /tmp/tmp.JoZdOPpcDg ++ rm /tmp/tmp.sM0jwvwfDE /tmp/tmp.JoZdOPpcDg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.dpujsqDI3K +++ mktemp ++ local LAST_ERR=/tmp/tmp.AhPnEwtbto ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.dpujsqDI3K ++ cat /tmp/tmp.AhPnEwtbto ++ rm /tmp/tmp.dpujsqDI3K /tmp/tmp.AhPnEwtbto ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aLHdK36Qap +++ mktemp ++ local LAST_ERR=/tmp/tmp.9PRZX274t5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aLHdK36Qap ++ cat /tmp/tmp.9PRZX274t5 ++ rm /tmp/tmp.aLHdK36Qap /tmp/tmp.9PRZX274t5 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.NTOFnuPMyV +++ mktemp ++ local LAST_ERR=/tmp/tmp.xUtaKmuljl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.NTOFnuPMyV ++ cat /tmp/tmp.xUtaKmuljl ++ rm /tmp/tmp.NTOFnuPMyV /tmp/tmp.xUtaKmuljl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nYzODHxSCh +++ mktemp ++ local LAST_ERR=/tmp/tmp.8NUJJZnmWl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nYzODHxSCh ++ cat /tmp/tmp.8NUJJZnmWl ++ rm /tmp/tmp.nYzODHxSCh /tmp/tmp.8NUJJZnmWl ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.JfE3FRcmJf +++ mktemp ++ local LAST_ERR=/tmp/tmp.NzztQM8sHZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JfE3FRcmJf ++ cat /tmp/tmp.NzztQM8sHZ ++ rm /tmp/tmp.JfE3FRcmJf /tmp/tmp.NzztQM8sHZ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.LNkvaBWYCI +++ mktemp ++ local LAST_ERR=/tmp/tmp.3Yhz7zjz9T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LNkvaBWYCI ++ cat /tmp/tmp.3Yhz7zjz9T ++ rm /tmp/tmp.LNkvaBWYCI /tmp/tmp.3Yhz7zjz9T ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Uh3PISi534 +++ mktemp ++ local LAST_ERR=/tmp/tmp.mtH5KlKmFs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Uh3PISi534 ++ cat /tmp/tmp.mtH5KlKmFs ++ rm /tmp/tmp.Uh3PISi534 /tmp/tmp.mtH5KlKmFs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.3Yxa8G1es1 +++ mktemp ++ local LAST_ERR=/tmp/tmp.kL6P8LcqkF ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.3Yxa8G1es1 ++ cat /tmp/tmp.kL6P8LcqkF ++ rm /tmp/tmp.3Yxa8G1es1 /tmp/tmp.kL6P8LcqkF ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zIgXtBQnqL +++ mktemp ++ local LAST_ERR=/tmp/tmp.s7LuG2DzPh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zIgXtBQnqL ++ cat /tmp/tmp.s7LuG2DzPh ++ rm /tmp/tmp.zIgXtBQnqL /tmp/tmp.s7LuG2DzPh ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine monitoring +++ local cluster_name=monitoring ++++ get_proxy monitoring ++++ local target_cluster=monitoring +++++ kubectl_bin get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.hDVrZoUlK5 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.nmiXBSMbou +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc monitoring -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.hDVrZoUlK5 +++++ cat /tmp/tmp.nmiXBSMbou +++++ rm /tmp/tmp.hDVrZoUlK5 /tmp/tmp.nmiXBSMbou +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo monitoring-haproxy ++++ return +++ local cluster_proxy=monitoring-haproxy +++ echo haproxy ++ kubectl_bin get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.qqqyVWPO72 +++ mktemp ++ local LAST_ERR=/tmp/tmp.6RHfqDKKki ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc monitoring -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.qqqyVWPO72 ++ cat /tmp/tmp.6RHfqDKKki ++ rm /tmp/tmp.qqqyVWPO72 /tmp/tmp.6RHfqDKKki ++ return 0 + [[ 2 == \2 ]] + echo + compare_kubectl statefulset/monitoring-pxc -no-prefix + local resource=statefulset/monitoring-pxc + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix.yml + local new_result=/tmp/tmp.u1cffxxmJX/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-18368", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.9o1zedkQSC ++ mktemp + local LAST_ERR=/tmp/tmp.MSAZxXWbjg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9o1zedkQSC + cat /tmp/tmp.MSAZxXWbjg + rm /tmp/tmp.9o1zedkQSC /tmp/tmp.MSAZxXWbjg + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-no-prefix-k127.yml /tmp/tmp.u1cffxxmJX/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-18T11:03:08+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2025-12-18T11:03:08+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy -no-prefix + local resource=statefulset/monitoring-haproxy + local postfix=-no-prefix + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix.yml + local new_result=/tmp/tmp.u1cffxxmJX/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy--no-prefix' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy--no-prefix ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-18368", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.kn4K87WhGZ ++ mktemp + local LAST_ERR=/tmp/tmp.DmDyFBWFEU + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kn4K87WhGZ + cat /tmp/tmp.DmDyFBWFEU + rm /tmp/tmp.kn4K87WhGZ /tmp/tmp.DmDyFBWFEU + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-no-prefix-k127.yml /tmp/tmp.u1cffxxmJX/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-18T11:03:09+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2025-12-18T11:03:09+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'apply my-env-var-secrets to add PMM_PREFIX' + set +o xtrace ----------------------------------------------------------------------------------- apply my-env-var-secrets to add PMM_PREFIX ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/envsecrets.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.SLaNERX9fS ++ mktemp + local LAST_ERR=/tmp/tmp.axJjGplaM5 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/conf/envsecrets.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.SLaNERX9fS secret/my-env-var-secrets created + cat /tmp/tmp.axJjGplaM5 + rm /tmp/tmp.SLaNERX9fS /tmp/tmp.axJjGplaM5 + return 0 + wait_for_generation sts/monitoring-pxc 3 + local resource=sts/monitoring-pxc + local target_generation=3 + echo 'Waiting for sts/monitoring-pxc to reach generation 3...' Waiting for sts/monitoring-pxc to reach generation 3... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=2 + '[' 2 -eq 3 ']' + echo 'Resource sts/monitoring-pxc is at generation 2. Waiting...' Resource sts/monitoring-pxc is at generation 2. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-pxc has reached generation 3.' Resource sts/monitoring-pxc has reached generation 3. + break + wait_for_generation sts/monitoring-haproxy 3 + local resource=sts/monitoring-haproxy + local target_generation=3 + echo 'Waiting for sts/monitoring-haproxy to reach generation 3...' Waiting for sts/monitoring-haproxy to reach generation 3... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 3 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 3.' Resource sts/monitoring-haproxy has reached generation 3. + break + desc 'add new PMM API key to secret' + set +o xtrace ----------------------------------------------------------------------------------- add new PMM API key to secret ----------------------------------------------------------------------------------- ++ jq .key +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.WqqjkmHD5T +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.rQMNvvD5mR ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.WqqjkmHD5T ++++ cat /tmp/tmp.rQMNvvD5mR ++++ rm /tmp/tmp.WqqjkmHD5T /tmp/tmp.rQMNvvD5mR ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zD3JHWbfQe +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.ckoM7RBwT7 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.zD3JHWbfQe ++++ cat /tmp/tmp.ckoM7RBwT7 ++++ rm /tmp/tmp.zD3JHWbfQe /tmp/tmp.ckoM7RBwT7 ++++ return 0 +++ endpoint=35.222.192.160 +++ '[' -n 35.222.192.160 ']' +++ '[' 35.222.192.160 '!=' null ']' +++ echo 35.222.192.160 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@35.222.192.160/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 292 92 --:--:-- --:--:-- --:--:-- 383 + API_KEY_NEW='"eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' + kubectl_bin patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' ++ mktemp + local LAST_OUT=/tmp/tmp.BndNrBAkBH ++ mktemp + local LAST_ERR=/tmp/tmp.mKWiBMXpKQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch secret my-cluster-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BndNrBAkBH secret/my-cluster-secrets patched + cat /tmp/tmp.mKWiBMXpKQ + rm /tmp/tmp.BndNrBAkBH /tmp/tmp.mKWiBMXpKQ + return 0 + desc 'delete old PMM key' + set +o xtrace ----------------------------------------------------------------------------------- delete old PMM key ----------------------------------------------------------------------------------- ++ jq '.[] | select( .name == "operator").id' +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.nHo6heKSob +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.zjYRavhulD ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.nHo6heKSob ++++ cat /tmp/tmp.zjYRavhulD ++++ rm /tmp/tmp.nHo6heKSob /tmp/tmp.zjYRavhulD ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.PMT4Y5atV3 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.pwvzLbY0rF ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.PMT4Y5atV3 ++++ cat /tmp/tmp.pwvzLbY0rF ++++ rm /tmp/tmp.PMT4Y5atV3 /tmp/tmp.pwvzLbY0rF ++++ return 0 +++ endpoint=35.222.192.160 +++ '[' -n 35.222.192.160 ']' +++ '[' 35.222.192.160 '!=' null ']' +++ echo 35.222.192.160 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ curl --insecure -X GET https://admin:admin@35.222.192.160/graph/api/auth/keys % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 602 100 602 0 0 1383 0 --:--:-- --:--:-- --:--:-- 1387 + ID_API_KEY_OLD=6 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.3K4xs9gtsI ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QvTh5qB2N6 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.3K4xs9gtsI +++ cat /tmp/tmp.QvTh5qB2N6 +++ rm /tmp/tmp.3K4xs9gtsI /tmp/tmp.QvTh5qB2N6 +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bG5V1gaC0b ++++ mktemp +++ local LAST_ERR=/tmp/tmp.mZSAemfl0t +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.bG5V1gaC0b +++ cat /tmp/tmp.mZSAemfl0t +++ rm /tmp/tmp.bG5V1gaC0b /tmp/tmp.mZSAemfl0t +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ echo 35.222.192.160 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + curl --insecure -X DELETE https://admin:admin@35.222.192.160/graph/api/auth/keys/6 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 59 0 --:--:-- --:--:-- --:--:-- 59 {"message":"API key deleted"}+ wait_for_generation sts/monitoring-pxc 4 + local resource=sts/monitoring-pxc + local target_generation=4 + echo 'Waiting for sts/monitoring-pxc to reach generation 4...' Waiting for sts/monitoring-pxc to reach generation 4... + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=3 + '[' 3 -eq 4 ']' + echo 'Resource sts/monitoring-pxc is at generation 3. Waiting...' Resource sts/monitoring-pxc is at generation 3. Waiting... + sleep 5 + true ++ kubectl get sts/monitoring-pxc -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-pxc has reached generation 4.' Resource sts/monitoring-pxc has reached generation 4. + break + wait_for_generation sts/monitoring-haproxy 4 + local resource=sts/monitoring-haproxy + local target_generation=4 + echo 'Waiting for sts/monitoring-haproxy to reach generation 4...' Waiting for sts/monitoring-haproxy to reach generation 4... + true ++ kubectl get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' + current_generation=4 + '[' 4 -eq 4 ']' + echo 'Resource sts/monitoring-haproxy has reached generation 4.' Resource sts/monitoring-haproxy has reached generation 4. + break + sleep 10 + kubectl wait pod -l app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --for=condition=ready --timeout=600s pod/monitoring-haproxy-0 condition met pod/monitoring-haproxy-1 condition met pod/monitoring-pxc-0 condition met pod/monitoring-pxc-1 condition met pod/monitoring-pxc-2 condition met + desc 'check if pmm-client container enabled' + set +o xtrace ----------------------------------------------------------------------------------- check if pmm-client container enabled ----------------------------------------------------------------------------------- + compare_kubectl statefulset/monitoring-pxc + local resource=statefulset/monitoring-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc.yml + local new_result=/tmp/tmp.u1cffxxmJX/statefulset_monitoring-pxc.yml + desc 'compare statefulset/monitoring-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-18368", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.y9Amlhzbn2 ++ mktemp + local LAST_ERR=/tmp/tmp.iMN2xkS27h + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.y9Amlhzbn2 + cat /tmp/tmp.iMN2xkS27h + rm /tmp/tmp.y9Amlhzbn2 /tmp/tmp.iMN2xkS27h + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-pxc-k127.yml /tmp/tmp.u1cffxxmJX/statefulset_monitoring-pxc.yml + log 'compare_kubectl: statefulset/monitoring-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-18T11:06:42+0000]' compare_kubectl: statefulset/monitoring-pxc OK [2025-12-18T11:06:42+0000] compare_kubectl: statefulset/monitoring-pxc OK + compare_kubectl statefulset/monitoring-haproxy + local resource=statefulset/monitoring-haproxy + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy.yml + local new_result=/tmp/tmp.u1cffxxmJX/statefulset_monitoring-haproxy.yml + desc 'compare statefulset/monitoring-haproxy-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/monitoring-haproxy- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.0 ]] + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-80.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/monitoring-haproxy ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.spec.template.spec.containers[].env[] | select(.name == "XTRABACKUP_ENABLED")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.metadata.annotations."kubernetes.digitalocean.com/load-balancer-id") | del(.metadata.annotations."service.beta.kubernetes.io/do-loadbalancer-type") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("monitoring-2-0-18368", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.mczBuCt4Kt ++ mktemp + local LAST_ERR=/tmp/tmp.fDHj6lxuzH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/monitoring-haproxy + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mczBuCt4Kt + cat /tmp/tmp.fDHj6lxuzH + rm /tmp/tmp.mczBuCt4Kt /tmp/tmp.fDHj6lxuzH + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/statefulset_monitoring-haproxy-k127.yml /tmp/tmp.u1cffxxmJX/statefulset_monitoring-haproxy.yml + log 'compare_kubectl: statefulset/monitoring-haproxy OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-18T11:06:43+0000]' compare_kubectl: statefulset/monitoring-haproxy OK [2025-12-18T11:06:43+0000] compare_kubectl: statefulset/monitoring-haproxy OK + desc 'verify clients agents statuses' + set +o xtrace ----------------------------------------------------------------------------------- verify clients agents statuses ----------------------------------------------------------------------------------- + sleep 300 ++ getSecretData my-cluster-secrets pmmserverkey ++ local secretName=my-cluster-secrets ++ local dataKey=pmmserverkey ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.W1Krow3McH +++ mktemp ++ local LAST_ERR=/tmp/tmp.r8JFZGJIOh ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.pmmserverkey}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.W1Krow3McH ++ cat /tmp/tmp.r8JFZGJIOh ++ rm /tmp/tmp.W1Krow3McH /tmp/tmp.r8JFZGJIOh ++ return 0 + API_KEY=eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 +++ get_service_endpoint monitoring-service +++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.fZdV4rLLYB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.nrjKFQABak ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.fZdV4rLLYB ++++ cat /tmp/tmp.nrjKFQABak ++++ rm /tmp/tmp.fZdV4rLLYB /tmp/tmp.nrjKFQABak ++++ return 0 +++ local endpoint= +++ '[' -z '' ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.yKYIuPhpnr +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.oLHCnAj9Tv ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.yKYIuPhpnr ++++ cat /tmp/tmp.oLHCnAj9Tv ++++ rm /tmp/tmp.yKYIuPhpnr /tmp/tmp.oLHCnAj9Tv ++++ return 0 +++ endpoint=35.222.192.160 +++ '[' -n 35.222.192.160 ']' +++ '[' 35.222.192.160 '!=' null ']' +++ echo 35.222.192.160 +++ head -n 1 +++ sed -e 's/^"//; s/"$//;' +++ return ++ get_mgmnt_service_list eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 35.222.192.160 monitoring-2-0-18368 ++ local api_key=eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 ++ local endpoint=35.222.192.160 ++ local namespace=monitoring-2-0-18368 ++ curl -s -k -H 'Authorization: Bearer eyJrIjoiYU9QRTJmMUhUM0JXQjBTYUhKWEQ0R21wOEtXbjUyM0IiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9' -X POST https://35.222.192.160/v1/management/Service/List ++ jq 'walk(if type=="object" then with_entries(select(.key | test("service_id|node_id|agent_id|created_at|updated_at") | not)) else . end)' ++ jq 'walk(if type == "array" then sort_by(.agent_type) else . end)' ++ /usr/bin/sed -i s/monitoring-2-0-18368-//g /tmp/tmp.u1cffxxmJX/active_pmm_agents.json ++ cat /tmp/tmp.u1cffxxmJX/active_pmm_agents.json ++ jq '.services | sort_by(.node_name)' ++ echo /tmp/tmp.u1cffxxmJX/active_pmm_agents_sorted.json + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2323/e2e-tests/monitoring-2-0/compare/agents-list.json /tmp/tmp.u1cffxxmJX/active_pmm_agents_sorted.json + desc 'check mysql metrics' + set +o xtrace ----------------------------------------------------------------------------------- check mysql metrics ----------------------------------------------------------------------------------- + get_metric_values node_boot_time_seconds pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0 admin:admin + local metric=node_boot_time_seconds + local instance=pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1766056246 ++ /usr/bin/date -u +%s + local end=1766056306 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aJ7Pjw00sc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.d68ciNdN7F +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aJ7Pjw00sc +++ cat /tmp/tmp.d68ciNdN7F +++ rm /tmp/tmp.aJ7Pjw00sc /tmp/tmp.d68ciNdN7F +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.m27GlZRo3m ++++ mktemp +++ local LAST_ERR=/tmp/tmp.HYO7et2c0X +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.m27GlZRo3m +++ cat /tmp/tmp.HYO7et2c0X +++ rm /tmp/tmp.m27GlZRo3m /tmp/tmp.HYO7et2c0X +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ echo 35.222.192.160 ++ sed -e 's/^"//; s/"$//;' ++ head -n 1 ++ return + local endpoint=35.222.192.160 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@35.222.192.160/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0%22%7d%20or%20node_boot_time_seconds%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0%22%7D%29&start=1766056246&end=1766056306&step=60' + local 'result={ "metric": {}, "values": [ [ 1766056246, "1766050841" ], [ 1766056306, "1766050841" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1766056246, "1766050841" ], [ 1766056306, "1766050841" ] ] }' = null ']' + jq '.values[][1]' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1766056246, "1766050841" ], [ 1766056306, "1766050841" ] ] }' "1766050841" "1766050841" + get_metric_values mysql_global_status_uptime pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0 admin:admin + local metric=mysql_global_status_uptime + local instance=pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1766056248 ++ /usr/bin/date -u +%s + local end=1766056308 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.83gfebyZwd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.dkNrpC4nee +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.83gfebyZwd +++ cat /tmp/tmp.dkNrpC4nee +++ rm /tmp/tmp.83gfebyZwd /tmp/tmp.dkNrpC4nee +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.JIbk04D72T ++++ mktemp +++ local LAST_ERR=/tmp/tmp.DkEOTl0e1v +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.JIbk04D72T +++ cat /tmp/tmp.DkEOTl0e1v +++ rm /tmp/tmp.JIbk04D72T /tmp/tmp.DkEOTl0e1v +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ echo 35.222.192.160 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=35.222.192.160 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@35.222.192.160/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0%22%7d%20or%20mysql_global_status_uptime%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-pxc-0%22%7D%29&start=1766056248&end=1766056308&step=60' + local 'result={ "metric": {}, "values": [ [ 1766056248, "164" ], [ 1766056308, "224" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1766056248, "164" ], [ 1766056308, "224" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1766056248, "164" ], [ 1766056308, "224" ] ] }' + jq '.values[][1]' + grep '^"[0-9]' "164" "224" + desc 'check haproxy metrics' + set +o xtrace ----------------------------------------------------------------------------------- check haproxy metrics ----------------------------------------------------------------------------------- + get_metric_values haproxy_backend_status pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_status + local instance=pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1766056251 ++ /usr/bin/date -u +%s + local end=1766056311 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ERJGb2AdLv ++++ mktemp +++ local LAST_ERR=/tmp/tmp.Dfxo3BX2MB +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ERJGb2AdLv +++ cat /tmp/tmp.Dfxo3BX2MB +++ rm /tmp/tmp.ERJGb2AdLv /tmp/tmp.Dfxo3BX2MB +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.1enPuc4JaM ++++ mktemp +++ local LAST_ERR=/tmp/tmp.fDwEcQaWrz +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.1enPuc4JaM +++ cat /tmp/tmp.fDwEcQaWrz +++ rm /tmp/tmp.1enPuc4JaM /tmp/tmp.fDwEcQaWrz +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ echo 35.222.192.160 ++ return + local endpoint=35.222.192.160 ++ curl -s -k 'https://admin:admin@35.222.192.160/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_status%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0%22%7D%29&start=1766056251&end=1766056311&step=60' ++ jq '.data.result[0]' + local 'result={ "metric": {}, "values": [ [ 1766056251, "0" ], [ 1766056311, "0" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1766056251, "0" ], [ 1766056311, "0" ] ] }' = null ']' + echo -n '{ "metric": {}, "values": [ [ 1766056251, "0" ], [ 1766056311, "0" ] ] + jq '.values[][1]' }' + grep '^"[0-9]' "0" "0" + get_metric_values haproxy_backend_active_servers pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0 admin:admin + local metric=haproxy_backend_active_servers + local instance=pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%s -d '-1 minute' + local start=1766056254 ++ /usr/bin/date -u +%s + local end=1766056314 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.aH2vt4aDwx ++++ mktemp +++ local LAST_ERR=/tmp/tmp.9YwirkDNdy +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.aH2vt4aDwx +++ cat /tmp/tmp.9YwirkDNdy +++ rm /tmp/tmp.aH2vt4aDwx /tmp/tmp.9YwirkDNdy +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NKwK7Sylho ++++ mktemp +++ local LAST_ERR=/tmp/tmp.MB4NfXYsv0 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.NKwK7Sylho +++ cat /tmp/tmp.MB4NfXYsv0 +++ rm /tmp/tmp.NKwK7Sylho /tmp/tmp.MB4NfXYsv0 +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ echo 35.222.192.160 ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ return + local endpoint=35.222.192.160 ++ jq '.data.result[0]' ++ curl -s -k 'https://admin:admin@35.222.192.160/graph/api/datasources/proxy/1/api/v1/query_range?query=min%28haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0%22%7d%20or%20haproxy_backend_active_servers%7Bnode_name%3D%7E%22pxc-prefix-monitoring-2-0-18368-monitoring-haproxy-0%22%7D%29&start=1766056254&end=1766056314&step=60' + local 'result={ "metric": {}, "values": [ [ 1766056254, "1" ], [ 1766056314, "1" ] ] }' + '[' '{ "metric": {}, "values": [ [ 1766056254, "1" ], [ 1766056314, "1" ] ] }' = null ']' + jq '.values[][1]' + grep '^"[0-9]' + echo -n '{ "metric": {}, "values": [ [ 1766056254, "1" ], [ 1766056314, "1" ] ] }' "1" "1" + desc 'check QAN data' + set +o xtrace ----------------------------------------------------------------------------------- check QAN data ----------------------------------------------------------------------------------- + get_qan20_values monitoring-pxc-0 admin:admin + local instance=monitoring-pxc-0 + local user_pass=admin:admin ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' + local start=2025-12-18T10:41:57 ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S + local end=2025-12-18T11:11:57 ++ get_service_endpoint monitoring-service ++ local service=monitoring-service +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.NkiLBQTlzE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.WkkM6jdYGe +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.NkiLBQTlzE +++ cat /tmp/tmp.WkkM6jdYGe +++ rm /tmp/tmp.NkiLBQTlzE /tmp/tmp.WkkM6jdYGe +++ return 0 ++ local endpoint= ++ '[' -z '' ']' +++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.zZS7Tiy3Xc ++++ mktemp +++ local LAST_ERR=/tmp/tmp.o0noGbBMX7 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.zZS7Tiy3Xc +++ cat /tmp/tmp.o0noGbBMX7 +++ rm /tmp/tmp.zZS7Tiy3Xc /tmp/tmp.o0noGbBMX7 +++ return 0 ++ endpoint=35.222.192.160 ++ '[' -n 35.222.192.160 ']' ++ '[' 35.222.192.160 '!=' null ']' ++ head -n 1 ++ sed -e 's/^"//; s/"$//;' ++ echo 35.222.192.160 ++ return + local endpoint=35.222.192.160 + cat ++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z + curl -s -k -XPOST -d @payload.json https://admin:admin@35.222.192.160/v0/qan/GetReport + jq '.rows[].fingerprint' null + rm -f payload.json + nodeList=($(get_node_id_from_pmm)) ++ get_node_id_from_pmm ++ nodeList=() ++ local -a nodeList +++ kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ej3bcWVFYL ++++ mktemp +++ local LAST_ERR=/tmp/tmp.8mJdFYcPUo +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns=NAME:.metadata.name +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ej3bcWVFYL +++ cat /tmp/tmp.8mJdFYcPUo +++ rm /tmp/tmp.ej3bcWVFYL /tmp/tmp.8mJdFYcPUo +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.cKUhPqJOpn ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yWglcWlUPc +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-pxc-0 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.cKUhPqJOpn +++ cat /tmp/tmp.yWglcWlUPc +++ rm /tmp/tmp.cKUhPqJOpn /tmp/tmp.yWglcWlUPc +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ jq -r .pmm_agent_status.node_id ++++ mktemp +++ local LAST_OUT=/tmp/tmp.oAGPSqqVVS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QLrN4AJoAN +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-pxc-1 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.oAGPSqqVVS +++ cat /tmp/tmp.QLrN4AJoAN +++ rm /tmp/tmp.oAGPSqqVVS /tmp/tmp.QLrN4AJoAN +++ return 0 ++ for instance in '$(kubectl_bin get pods --no-headers -l app.kubernetes.io/component=pxc --output=custom-columns='\''NAME:.metadata.name'\'')' ++ nodeList+=($(kubectl_bin exec -n "$namespace" $instance -c pmm-client -- pmm-admin status --json | jq -r '.pmm_agent_status.node_id')) +++ jq -r .pmm_agent_status.node_id +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json ++++ mktemp +++ local LAST_OUT=/tmp/tmp.gtfyJl5oXj ++++ mktemp +++ local LAST_ERR=/tmp/tmp.qkYaTGmbq8 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-pxc-2 -c pmm-client -- pmm-admin status --json +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.gtfyJl5oXj +++ cat /tmp/tmp.qkYaTGmbq8 +++ rm /tmp/tmp.gtfyJl5oXj /tmp/tmp.qkYaTGmbq8 +++ return 0 ++ echo /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 + nodeList_from_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service +++ grep /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.vzI3dASE26 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.iRPumc6280 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.vzI3dASE26 +++++ cat /tmp/tmp.iRPumc6280 +++++ rm /tmp/tmp.vzI3dASE26 /tmp/tmp.iRPumc6280 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.kmqaxOpoyB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.DuO6TPjiJC ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.kmqaxOpoyB ++++ cat /tmp/tmp.DuO6TPjiJC ++++ rm /tmp/tmp.kmqaxOpoyB /tmp/tmp.DuO6TPjiJC ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9cB7CQBWp8 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.8fil7Pv9lz ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.9cB7CQBWp8 ++++ cat /tmp/tmp.8fil7Pv9lz ++++ rm /tmp/tmp.9cB7CQBWp8 /tmp/tmp.8fil7Pv9lz ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.Cu3xttTC3K ++++ mktemp +++ local LAST_ERR=/tmp/tmp.xaoLZFJprK +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.Cu3xttTC3K +++ cat /tmp/tmp.xaoLZFJprK +++ rm /tmp/tmp.Cu3xttTC3K /tmp/tmp.xaoLZFJprK +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ awk '{print $4}' +++ grep /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ grep -q NotFound ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.uAoGMYGDex ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.U70Ow3idUW +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.uAoGMYGDex +++++ cat /tmp/tmp.U70Ow3idUW +++++ rm /tmp/tmp.uAoGMYGDex /tmp/tmp.U70Ow3idUW +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.9qXa6gUyw1 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.sK2SUZgCyu ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.9qXa6gUyw1 ++++ cat /tmp/tmp.sK2SUZgCyu ++++ rm /tmp/tmp.9qXa6gUyw1 /tmp/tmp.sK2SUZgCyu ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.evFDWleplZ +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.3ANxzVXag6 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.evFDWleplZ ++++ cat /tmp/tmp.3ANxzVXag6 ++++ rm /tmp/tmp.evFDWleplZ /tmp/tmp.3ANxzVXag6 ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.bN9AloPD6W ++++ mktemp +++ local LAST_ERR=/tmp/tmp.q1kdNTT6Ma +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.bN9AloPD6W +++ cat /tmp/tmp.q1kdNTT6Ma +++ rm /tmp/tmp.bN9AloPD6W /tmp/tmp.q1kdNTT6Ma +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 +++ awk '{print $4}' ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.K5A8eayUUU ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.EexaxEmaUR +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.K5A8eayUUU +++++ cat /tmp/tmp.EexaxEmaUR +++++ rm /tmp/tmp.K5A8eayUUU /tmp/tmp.EexaxEmaUR +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ahZyXnjSEB +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.jlRJ4TpWKx ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.ahZyXnjSEB ++++ cat /tmp/tmp.jlRJ4TpWKx ++++ rm /tmp/tmp.ahZyXnjSEB /tmp/tmp.jlRJ4TpWKx ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.2IPmqgRe54 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.UUkmDnMlKg ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.2IPmqgRe54 ++++ cat /tmp/tmp.UUkmDnMlKg ++++ rm /tmp/tmp.2IPmqgRe54 /tmp/tmp.UUkmDnMlKg ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.jpH37Xv04I ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1xQYCTJT8Z +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.jpH37Xv04I +++ cat /tmp/tmp.1xQYCTJT8Z +++ rm /tmp/tmp.jpH37Xv04I /tmp/tmp.1xQYCTJT8Z +++ return 0 ++ echo /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb ']' + for node_id in '"${nodeList_from_pmm[@]}"' + '[' -z /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 ']' + kubectl_bin patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' ++ mktemp + local LAST_OUT=/tmp/tmp.9uGWNAllP0 ++ mktemp + local LAST_ERR=/tmp/tmp.Dp4huzfBOH + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc monitoring --type json '-p=[{"op":"add","path":"/spec/pause","value":true}]' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9uGWNAllP0 perconaxtradbcluster.pxc.percona.com/monitoring patched + cat /tmp/tmp.Dp4huzfBOH + rm /tmp/tmp.9uGWNAllP0 /tmp/tmp.Dp4huzfBOH + return 0 + wait_for_delete pod/monitoring-pxc-0 + local res=pod/monitoring-pxc-0 + echo -n 'waiting for pod/monitoring-pxc-0 to be deleted' waiting for pod/monitoring-pxc-0 to be deleted+ set +o xtrace .................Error from server (NotFound): pods "monitoring-pxc-0" not found + does_node_id_exists_in_pmm=($(does_node_id_exists "${nodeList[@]}")) ++ does_node_id_exists /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 ++ nodeList=("$@") ++ local -a nodeList ++ nodeList_from_pmm=() ++ local -a nodeList_from_pmm ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/0d44046e-2b53-4359-9cf9-05bacef9dada ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' +++ awk '{print $4}' ++++ grep -q NotFound +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.bhYrW8Tfa4 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.pwfso6Dv8T +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.bhYrW8Tfa4 +++++ cat /tmp/tmp.pwfso6Dv8T +++++ rm /tmp/tmp.bhYrW8Tfa4 /tmp/tmp.pwfso6Dv8T +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.F8mTxV243d +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uxG1n9e4s3 ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.F8mTxV243d ++++ cat /tmp/tmp.uxG1n9e4s3 ++++ rm /tmp/tmp.F8mTxV243d /tmp/tmp.uxG1n9e4s3 ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.ODP2K0YTsK +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.uPEDWHLrMe ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.ODP2K0YTsK ++++ cat /tmp/tmp.uPEDWHLrMe ++++ rm /tmp/tmp.ODP2K0YTsK /tmp/tmp.uPEDWHLrMe ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TLJl55cZmS ++++ mktemp +++ local LAST_ERR=/tmp/tmp.iW8GLqRrq3 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TLJl55cZmS +++ cat /tmp/tmp.iW8GLqRrq3 +++ rm /tmp/tmp.TLJl55cZmS /tmp/tmp.iW8GLqRrq3 +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/8db30645-bbe7-4d9d-aa50-3a282828cfbb ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ awk '{print $4}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.UnfSBqpr5q ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.MSYc8an2ST +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.UnfSBqpr5q +++++ cat /tmp/tmp.MSYc8an2ST +++++ rm /tmp/tmp.UnfSBqpr5q /tmp/tmp.MSYc8an2ST +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.dQ0m6fwZ25 +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.GbrSpu4ynN ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.dQ0m6fwZ25 ++++ cat /tmp/tmp.GbrSpu4ynN ++++ rm /tmp/tmp.dQ0m6fwZ25 /tmp/tmp.GbrSpu4ynN ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.mU5YoU1knd +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.P9okwfmcQM ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.mU5YoU1knd ++++ cat /tmp/tmp.P9okwfmcQM ++++ rm /tmp/tmp.mU5YoU1knd /tmp/tmp.P9okwfmcQM ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.TBN0CVUq2O ++++ mktemp +++ local LAST_ERR=/tmp/tmp.1iOwVc46yo +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.TBN0CVUq2O +++ cat /tmp/tmp.1iOwVc46yo +++ rm /tmp/tmp.TBN0CVUq2O /tmp/tmp.1iOwVc46yo +++ return 0 ++ for node_id in '"${nodeList[@]}"' ++ nodeList_from_pmm+=($(kubectl_bin exec -n "${namespace}" monitoring-0 -- pmm-admin --server-url=https://admin:admin@$(get_service_ip monitoring-service)/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE | grep $node_id | awk '{print $4}')) +++ grep /node_id/5fcfc167-90d1-43ed-9825-2ad14ac8b833 ++++ get_service_ip monitoring-service ++++ local service=monitoring-service ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++ grep -q NotFound +++ awk '{print $4}' +++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.spec.type}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.qdy4faVXb7 ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.WHPdSo7AC9 +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get service/monitoring-service -o 'jsonpath={.spec.type}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.qdy4faVXb7 +++++ cat /tmp/tmp.WHPdSo7AC9 +++++ rm /tmp/tmp.qdy4faVXb7 /tmp/tmp.WHPdSo7AC9 +++++ return 0 ++++ '[' LoadBalancer = ClusterIP ']' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[]}' ++++ grep -E -q 'hostname|ip' ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.eRoCMWFNwY +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.y0Exo3SRnT ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].ip}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.eRoCMWFNwY ++++ cat /tmp/tmp.y0Exo3SRnT ++++ rm /tmp/tmp.eRoCMWFNwY /tmp/tmp.y0Exo3SRnT ++++ return 0 ++++ kubectl_bin get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' +++++ mktemp ++++ local LAST_OUT=/tmp/tmp.zhr6JrAIfp +++++ mktemp ++++ local LAST_ERR=/tmp/tmp.YjdZVEF81h ++++ local exit_status=0 +++++ seq 0 2 ++++ for i in '$(seq 0 2)' ++++ set +e ++++ kubectl get service/monitoring-service -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' ++++ exit_status=0 ++++ set -e ++++ '[' 0 '!=' 0 ']' ++++ break ++++ cat /tmp/tmp.zhr6JrAIfp ++++ cat /tmp/tmp.YjdZVEF81h ++++ rm /tmp/tmp.zhr6JrAIfp /tmp/tmp.YjdZVEF81h ++++ return 0 +++ kubectl_bin exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE ++++ mktemp +++ local LAST_OUT=/tmp/tmp.0q2PJMLeim ++++ mktemp +++ local LAST_ERR=/tmp/tmp.rEnHFh6Klz +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl exec -n monitoring-2-0-18368 monitoring-0 -- pmm-admin --server-url=https://admin:admin@35.222.192.160/ --server-insecure-tls inventory list nodes --node-type=CONTAINER_NODE +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.0q2PJMLeim +++ cat /tmp/tmp.rEnHFh6Klz +++ rm /tmp/tmp.0q2PJMLeim /tmp/tmp.rEnHFh6Klz +++ return 0 ++ echo + [[ -n '' ]] + helm uninstall monitoring release "monitoring" uninstalled + destroy monitoring-2-0-18368 + local namespace=monitoring-2-0-18368 + local ignore_logs=true + [[ 0 == 1 ]] + desc 'destroy cluster/operator and all other resources' + set +o xtrace ----------------------------------------------------------------------------------- destroy cluster/operator and all other resources ----------------------------------------------------------------------------------- + '[' true == false -o 1 == 1 ']' + grep -v level=info + grep -v 'the object has been modified' + tee /tmp/tmp.u1cffxxmJX/operator.log + grep -v 'get backup status: Job.batch' + /usr/bin/sed -r 's/"ts":[0-9.]+//; s^limits-[0-9.]+/^^g' + sort -u ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ jq -r '.items[] | select(.metadata.deletionTimestamp == null) | .metadata.name' ++ head -1 ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.jta6TLETSh +++ mktemp ++ local LAST_ERR=/tmp/tmp.bqELRPBTFN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator --field-selector=status.phase=Running -o json -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.jta6TLETSh ++ cat /tmp/tmp.bqELRPBTFN ++ rm /tmp/tmp.jta6TLETSh /tmp/tmp.bqELRPBTFN ++ return 0 + kubectl_bin logs -n pxc-operator percona-xtradb-cluster-operator-5449f85878-ljgfx ++ mktemp + local LAST_OUT=/tmp/tmp.iaU66KLojl ++ mktemp + local LAST_ERR=/tmp/tmp.wMccKVuPxm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs -n pxc-operator percona-xtradb-cluster-operator-5449f85878-ljgfx + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.iaU66KLojl + cat /tmp/tmp.wMccKVuPxm + rm /tmp/tmp.iaU66KLojl /tmp/tmp.wMccKVuPxm + return 0 } }, }, { }, }, { }, }, { }, }, }, - }, - { - }, + }, ... // 11 identical elements ... // 12 identical elements ... // 16 identical fields ... // 16 identical fields 2025-12-18T10:52:11.821Z INFO setup Manager starting up {"gitCommit": "64f0860fba86079c1820576f93ff6c6bdd547dbb", "gitBranch": "PR-2323-64f0860f", "buildTime": "2025-12-18T09:33:12Z", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} 2025-12-18T10:52:11.821Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.14-gke.1081000"} 2025-12-18T10:52:11.822Z INFO setup Feature gates {"PXCO_FEATURE_GATES": "", "enabled": ""} 2025-12-18T10:52:11.825Z INFO setup Registering Components. 2025-12-18T10:52:12.000Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-12-18T10:52:12.000Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-12-18T10:52:12.000Z INFO controller-runtime.metrics Starting metrics server 2025-12-18T10:52:12.000Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-12-18T10:52:12.000Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-12-18T10:52:12.000Z INFO controller-runtime.webhook Starting webhook server 2025-12-18T10:52:12.000Z INFO setup Starting the Cmd. 2025-12-18T10:52:12.000Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-12-18T10:52:12.001Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-12-18T10:52:12.101Z INFO Attempting to acquire leader lease... {"lock": "pxc-operator/08db1feb.percona.com"} 2025-12-18T10:52:12.131Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2025-12-18T10:52:12.131Z INFO Successfully acquired lease {"lock": "pxc-operator/08db1feb.percona.com"} 2025-12-18T10:52:12.132Z DEBUG events percona-xtradb-cluster-operator-5449f85878-ljgfx_6fc19f7c-12e7-4afa-851b-632669120335 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"668045d2-4d00-4cae-9a43-5b809efdf914","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1766055132124063009"}, "reason": "LeaderElection"} 2025-12-18T10:52:12.132Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-12-18T10:52:12.132Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-12-18T10:52:12.132Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-12-18T10:52:12.233Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2025-12-18T10:52:12.233Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2025-12-18T10:52:12.233Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2025-12-18T10:52:12.233Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2025-12-18T10:52:12.233Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2025-12-18T10:52:12.233Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2025-12-18T10:53:51.700Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "version": "1.19.0"} 2025-12-18T10:53:52.972Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-12-18T10:53:53.036Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-12-18T10:53:53.122Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-18T10:53:53.188Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-18T10:53:53.287Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-18T10:53:53.462Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "5c7ab73e-7dd1-4fd0-afee-27fb9ca5e3e9", "object": "monitoring-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-18T10:53:54.420Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "f3f99513-773d-4290-8764-22288338288c", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-12-18T10:53:54.437Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "f3f99513-773d-4290-8764-22288338288c", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-12-18T10:55:10.454Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03", "user": "operator"} 2025-12-18T10:55:10.495Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03", "user": "monitor"} 2025-12-18T10:55:10.568Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03"} 2025-12-18T10:55:10.604Z INFO monitor user privileges granted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03"} 2025-12-18T10:55:10.645Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03", "user": "xtrabackup"} 2025-12-18T10:55:10.700Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03"} 2025-12-18T10:55:10.741Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "933a5fff-89d8-4098-a3bf-c63916b7fc03", "user": "replication"} 2025-12-18T10:57:41.077Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "40cee983-7f43-4aec-b5d4-c21147c37dc6", "user": "root"} 2025-12-18T10:57:41.144Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "40cee983-7f43-4aec-b5d4-c21147c37dc6", "new version": "8.0.43-34.1"} 2025-12-18T11:00:38.294Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "77a051e6-13e1-4e7b-878f-acc93e53fa03", "user": "pmmserverkey"} 2025-12-18T11:00:40.937Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "ae92cb73-fcd7-48ad-9ad4-4a4ac6729cc8", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:00:41.030Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "ae92cb73-fcd7-48ad-9ad4-4a4ac6729cc8", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:00:41.192Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "ae92cb73-fcd7-48ad-9ad4-4a4ac6729cc8", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:01:39.491Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "29026fd2-cac2-48bc-b6bc-7e12b6f28a03", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.171.74.40:33062: connect: connection refused"} 2025-12-18T11:01:39.709Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "b84fef97-c0a5-44eb-b811-9c261489ddbe", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.171.74.40:33062: connect: connection refused"} 2025-12-18T11:02:26.113Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39d633d9-09b4-4a04-bff1-74503c901ab0", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-2-0-18368 on 34.118.224.10:53: no such host"} 2025-12-18T11:03:13.258Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "fa0a532e-9e62-45f3-b65e-40727c94dac0", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:03:13.344Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "fa0a532e-9e62-45f3-b65e-40727c94dac0", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:04:16.078Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "d70363d8-3064-4f0a-bfe0-53c2f7838b6c", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.171.74.41:33062: connect: connection refused"} 2025-12-18T11:05:09.345Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "7088afd4-0b59-41e2-8188-5a318c97d211", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-2-0-18368 on 34.118.224.10:53: no such host"} 2025-12-18T11:05:56.442Z INFO Password changed, updating user {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "user": "pmmserverkey"} 2025-12-18T11:05:56.462Z INFO HAProxy pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "last-applied-secret": "90f1daf8fbfcd28aec8f1038d82b4740bfb06d8d87a1382aba0e68ab5303cc0d"} 2025-12-18T11:05:56.462Z INFO Internal secrets updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "user": "pmmserverkey"} 2025-12-18T11:05:56.462Z INFO PXC pods will be restarted {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "last-applied-secret": "90f1daf8fbfcd28aec8f1038d82b4740bfb06d8d87a1382aba0e68ab5303cc0d"} 2025-12-18T11:05:56.467Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:05:56.543Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "39db689f-8e47-4cfa-8372-80bd26a32982", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:06:53.389Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "597421f2-85b9-4346-9754-dfb87053cd86", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-1: dial tcp 10.171.74.42:33062: connect: connection refused"} 2025-12-18T11:07:34.838Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "3c6e08b7-2680-4af4-b592-c89ff781242c", "err": "failed to connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-2-0-18368 on 34.118.224.10:53: no such host"} 2025-12-18T11:07:35.113Z INFO reconcile replication error {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "96ad9bf0-c435-4418-98e2-490abdb0a6ae", "err": "failed to ensure cluster readonly status: connect to pod monitoring-pxc-0: dial tcp: lookup monitoring-pxc-0.monitoring-pxc.monitoring-2-0-18368 on 34.118.224.10:53: no such host"} 2025-12-18T11:12:31.365Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "a9804797-3a88-40a6-ac2b-6de1bb9cc5d1", "object": "monitoring-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:12:31.463Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "a9804797-3a88-40a6-ac2b-6de1bb9cc5d1", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} 2025-12-18T11:12:31.546Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"monitoring","namespace":"monitoring-2-0-18368"}, "namespace": "monitoring-2-0-18368", "name": "monitoring", "reconcileID": "a9804797-3a88-40a6-ac2b-6de1bb9cc5d1", "object": "monitoring-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} ... // 22 identical elements ... // 22 identical fields ... // 23 identical elements ... // 25 identical elements ... // 26 identical elements ... // 2 identical entries ... // 2 identical fields ... // 2 identical fields ... // 2 identical fields ... // 2 identical fields ... // 3 identical fields ... // 3 identical fields ... // 3 identical fields ... // 4 identical elements ... // 4 identical fields ... // 4 identical fields ... // 5 identical fields ... // 6 identical fields ... // 7 identical fields ... // 8 identical fields ... // 9 identical elements ... // 9 identical fields ... // 9 identical fields AccessModes: nil, ActiveDeadlineSeconds: nil, Affinity: nil, Affinity: nil, Annotations: map[string]string{ - Annotations: map[string]string{ + Annotations: map[string]string{ + APIVersion: "", - APIVersion: "apps/v1", - APIVersion: "apps/v1", - APIVersion: "v1", Args: {"haproxy"}, Args: {"mysqld"}, Args: nil, AutomountServiceAccountToken: nil, + AvailableReplicas: 0, - AvailableReplicas: 2, - AvailableReplicas: 3, AWSElasticBlockStore: nil, AzureFile: nil, Capacity: nil, - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, ConfigMapKeyRef: nil, ConfigMap: &v1.ConfigMapVolumeSource{ ContainerPort: 30100, ContainerPort: 30101, ContainerPort: 30102, ContainerPort: 30103, ContainerPort: 30104, ContainerPort: 30105, ContainerPort: 3306, ContainerPort: 33060, ContainerPort: 33062, ContainerPort: 3307, ContainerPort: 3309, ContainerPort: 4444, ContainerPort: 4567, ContainerPort: 4568, ContainerPort: 7777, ContainerPort: 8404, Containers: []v1.Container{ + CreationTimestamp: v1.Time{}, - CreationTimestamp: v1.Time{Time: s"2025-12-18 10:53:52 +0000 UTC"}, - CreationTimestamp: v1.Time{Time: s"2025-12-18 10:53:53 +0000 UTC"}, + CurrentReplicas: 0, - CurrentReplicas: 2, - CurrentReplicas: 3, + CurrentRevision: "", - CurrentRevision: "monitoring-haproxy-6c887849fd", - CurrentRevision: "monitoring-haproxy-77457689cb", - CurrentRevision: "monitoring-haproxy-86cc4bf678", - CurrentRevision: "monitoring-haproxy-fcdccd58d", - CurrentRevision: "monitoring-pxc-5f7ffbf476", - CurrentRevision: "monitoring-pxc-695f496d6d", - CurrentRevision: "monitoring-pxc-6cb86df5fd", - CurrentRevision: "monitoring-pxc-7b88d9cd54", DataSource: nil, DataSourceRef: nil, - DefaultMode: &420, - DefaultMode: &420, + DefaultMode: nil, + DefaultMode: nil, DeletionGracePeriodSeconds: nil, DeletionGracePeriodSeconds: nil, DeletionTimestamp: nil, + DeprecatedServiceAccount: "", - DeprecatedServiceAccount: "default", + DNSPolicy: "", - DNSPolicy: "ClusterFirst", EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "my-env-var-secrets"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc"}, {Name: "LIVENESS_CHECK_TIMEOUT", Value: "5"}, {Name: "READINESS_CHECK_TIMEOUT", Value: "1"}}, Env: {{Name: "PXC_SERVICE", Value: "monitoring-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-monitoring"}, Key: "xtrabackup"}}}, ...}, Env: []v1.EnvVar{ EphemeralContainers: nil, Exec: nil, FailureThreshold: 3, FC: nil, FieldPath: "metadata.name", FieldPath: "metadata.namespace", FieldRef: nil, FieldRef: &v1.ObjectFieldSelector{ - FieldsType: "FieldsV1", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., FileKeyRef: nil, Finalizers: nil, Finalizers: nil, + Generation: 0, - Generation: 1, - Generation: 2, - Generation: 3, - Generation: 4, GitRepo: nil, GRPC: nil, Host: "", HostAliases: nil, HostAliases: nil, HostIP: "", HostIPC: false, Hostname: "", HostPort: 0, HTTPGet: &v1.HTTPGetAction{ HTTPHeaders: nil, ImagePullPolicy: "Always", ImagePullSecrets: nil, InitContainers: []v1.Container{ InitialDelaySeconds: 15, InitialDelaySeconds: 300, InitialDelaySeconds: 300, ISCSI: nil, Items: nil, Items: nil, - Key: "pmmserver", + Key: "pmmserverkey", "kubectl.kubernetes.io/default-container": "haproxy", "kubectl.kubernetes.io/default-container": "pxc", Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Labels: nil, + "last-applied-secret": "90f1daf8fbfcd28aec8f1038d82b4740bfb06d8d87a1382aba0e68ab5303cc0d", Lifecycle: nil, Lifecycle: &{PreStop: &{Exec: &{Command: {"bash", "-c", "pmm-admin unregister --force"}}}}, LivenessProbe: &v1.Probe{ LocalObjectReference: {Name: "auto-monitoring-pxc"}, LocalObjectReference: {Name: "internal-monitoring"}, LocalObjectReference: {Name: "monitoring-haproxy"}, LocalObjectReference: {Name: "monitoring-pxc"}, ManagedFields: nil, + ManagedFields: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - Manager: "kube-controller-manager", - Manager: "percona-xtradb-cluster-operator", MinReadySeconds: 0, [mysql] 2025/12/18 11:05:08 packets.go:58 unexpected EOF [mysql] 2025/12/18 11:07:55 packets.go:58 unexpected EOF Name: "", Name: "auto-config", {Name: "bin", VolumeSource: {EmptyDir: &{}}}, {Name: "CLIENT_PORT_LISTEN", Value: "7777"}, {Name: "CLIENT_PORT_MAX", Value: "30105"}, {Name: "CLIENT_PORT_MIN", Value: "30100"}, Name: "config", {Name: "DB_TYPE", Value: "haproxy"}, {Name: "DB_TYPE", Value: "mysql"}, {Name: "DB_USER", Value: "monitor"}, {Name: "haproxy-auto", VolumeSource: {EmptyDir: &{}}}, Name: "haproxy-custom", Name: "ist", {Name: "MONITOR_USER", Value: "monitor"}, Name: "my-env-var-secrets", Name: "mysql", Name: "mysql-admin", Name: "mysql-init-file", Name: "mysql-replicas", Name: "mysql-users-secret-file", Name: "mysqlx", {Name: "PMM_AGENT_LISTEN_PORT", Value: "7777"}, {Name: "PMM_AGENT_PORTS_MIN", Value: "30100"}, {Name: "PMM_AGENT_SERVER_ADDRESS", Value: "monitoring-service"}, Name: "PMM_AGENT_SERVER_PASSWORD", Name: "PMM_AGENT_SERVER_USERNAME", {Name: "PMM_AGENT_SERVER_USERNAME", Value: "api_key"}, {Name: "PMM_AGENT_SETUP_FORCE", Value: "1"}, Name: "PMM_AGENT_SETUP_NODE_NAME", {Name: "PMM_AGENT_SETUP_NODE_TYPE", Value: "container"}, Name: "PMM_PASSWORD", {Name: "PMM_SERVER", Value: "monitoring-service"}, Name: "PMM_USER", Name: "POD_NAME", Name: "POD_NAMESPASE", Name: "proxy-protocol", Namespace: "monitoring-2-0-18368", Name: "ssl", Name: "ssl-internal", Name: "sst", Name: "stats", {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, Name: "vault-keyring-secret", Name: "write-set", NFS: nil, NodeName: "", NodeSelector: nil, ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "last-applied-secret": "90f1daf8fbfcd28aec8f1038d82b4740bfb06d8d87a1382aba0e68ab5303cc0d", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0"}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "haproxy", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e"}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "last-applied-secret": "90f1daf8fbfcd28aec8f1038d82b4740bfb06d8d87a1382aba0e68ab5303cc0d", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", ...}}, ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/ssl-hash": "a9cb8c0eef5a13c6cf54bc54c3377aab", "percona.com/ssl-internal-hash": "ad8317b7d7ff458cfe8ef06125b87cfd"}}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{ + ObservedGeneration: 0, - ObservedGeneration: 1, - ObservedGeneration: 2, - ObservedGeneration: 3, - ObservedGeneration: 4, - Operation: "Update", - Operation: "Update", Optional: &false, Optional: nil, Optional: &true, Optional: &true, Ordinals: nil, OS: nil, Overhead: nil, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "monitoring", UID: "998ab165-02d9-4535-bcbc-b82369bd8a2a", ...}}, OwnerReferences: nil, Path: "/local/Status", "percona.com/configuration-hash": "d41d8cd98f00b204e9800998ecf8427e", "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", + "percona.com/env-secret-config-hash": "5bc403a8d44324c1bc704f02e74876d0", + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBw"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI5MGYxZGFmOGZiZmNkMjhhZWM4ZjEwMzhkODJiNDc0MGJmYjA2ZDhkODdhMTM4MmFiYTBlNjhhYjUzMDNjYzBkIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFi"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01h"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjMyMy02NGYwODYwZiIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMzIzLTY0ZjA4NjBmIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYWRtaW4ifSx7Im5hbWUiOiJQTU1fUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVyIn19fSx7Im5hbWUiOiJDTElFTlRfUE9SVF9MSVNURU4iLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBPRF9OQU1FIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWUifX19LHsibmFtZSI6IlBPRF9OQU1FU1BBU0UiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZXNwYWNlIn19fSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0FERFJFU1MiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfVVNFUk5BTUUiLCJ2YWx1ZSI6ImFkbWluIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXIifX19LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fUE9SVCIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQTU1fQUdFTlRfQ09ORklHX0ZJTEUiLCJ2YWx1ZSI6Ii91c3IvbG9jYWwvcGVyY29uYS9wbW0yL2NvbmZpZy9wbW0tYWdlbnQueWFtbCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfSU5TRUNVUkVfVExTIiwidmFsdWUiOiIxIn0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9BRERSRVNTIiwidmFsdWUiOiIwLjAuMC4wIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFVFVQX01FVFJJQ1NfTU9ERSIsInZhbHVlIjoicHVz"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Miwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoiaGFwcm94eSIsImFwcC5rdWJlcm5ldGVzLmlvL2luc3RhbmNlIjoibW9uaXRvcmluZyIsImFwcC5rdWJlcm5ldGVzLmlvL21hbmFnZWQtYnkiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyLW9wZXJhdG9yIiwiYXBwLmt1YmVybmV0ZXMuaW8vbmFtZSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIiLCJhcHAua3ViZXJuZXRlcy5pby9wYXJ0LW9mIjoicGVyY29uYS14dHJhZGItY2x1c3RlciJ9fSwidGVtcGxhdGUiOnsibWV0YWRhdGEiOnsibGFiZWxzIjp7ImFwcC5rdWJlcm5ldGVzLmlvL2NvbXBvbmVudCI6ImhhcHJveHkiLCJhcHAua3ViZXJuZXRlcy5pby9pbnN0YW5jZSI6Im1vbml0b3JpbmciLCJhcHAua3ViZXJuZXRlcy5pby9tYW5hZ2VkLWJ5IjoicGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvciIsImFwcC5rdWJlcm5ldGVzLmlvL25hbWUiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIiwiYXBwLmt1YmVybmV0ZXMuaW8vcGFydC1vZiI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXIifSwiYW5ub3RhdGlvbnMiOnsia3ViZWN0bC5rdWJlcm5ldGVzLmlvL2RlZmF1bHQtY29udGFpbmVyIjoiaGFwcm94eSIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIn19LCJzcGVjIjp7InZvbHVtZXMiOlt7Im5hbWUiOiJoYXByb3h5LWN1c3RvbSIsImNvbmZpZ01hcCI6eyJuYW1lIjoibW9uaXRvcmluZy1oYXByb3h5Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJoYXByb3h5LWF1dG8iLCJlbXB0eURpciI6e319LHsibmFtZSI6Im15c3FsLXVzZXJzLXNlY3JldC1maWxlIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJteS1lbnYtdmFyLXNlY3JldHMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6ImJpbiIsImVtcHR5RGlyIjp7fX1dLCJpbml0Q29udGFpbmVycyI6W3sibmFtZSI6InB4Yy1pbml0IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3I6UFItMjMyMy02NGYwODYwZiIsImNvbW1hbmQiOlsiL3B4Yy1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifSx7Im5hbWUiOiJoYXByb3h5LWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMzIzLTY0ZjA4NjBmIiwiY29tbWFuZCI6WyIvaGFwcm94eS1pbml0LWVudHJ5cG9pbnQuc2giXSwicmVzb3VyY2VzIjp7ImxpbWl0cyI6eyJjcHUiOiI1MG0iLCJtZW1vcnkiOiI1ME0ifX0sInZvbHVtZU1vdW50cyI6W3sibmFtZSI6ImJpbiIsIm1vdW50UGF0aCI6Ii9vcHQvcGVyY29uYSJ9XSwiaW1hZ2VQdWxsUG9saWN5IjoiQWx3YXlzIn1dLCJjb250YWluZXJzIjpbeyJuYW1lIjoicG1tLWNsaWVudCIsImltYWdlIjoicGVyY29uYWxhYi9wbW0tY2xpZW50OmRldi1sYXRlc3QiLCJwb3J0cyI6W3siY29udGFpbmVyUG9ydCI6Nzc3N30seyJjb250YWluZXJQb3J0IjozMDEwMH0seyJjb250YWluZXJQb3J0IjozMDEwMX0seyJjb250YWluZXJQb3J0IjozMDEwMn0seyJjb250YWluZXJQb3J0IjozMDEwM30seyJjb250YWluZXJQb3J0IjozMDEwNH0seyJjb250YWluZXJQb3J0IjozMDEwNX1dLCJlbnZGcm9tIjpbeyJzZWNyZXRSZWYiOnsibmFtZSI6Im15LWVudi12YXItc2VjcmV0cyIsIm9wdGlvbmFsIjp0cnVlfX1dLCJlbnYiOlt7Im5hbWUiOiJQTU1fU0VSVkVSIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fVVNFUiIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXJrZXkifX19LHsibmFtZSI6IkNMSUVOVF9QT1JUX0xJU1RFTiIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE9EX05BTUUiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZSJ9fX0seyJuYW1lIjoiUE9EX05BTUVTUEFTRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lc3BhY2UifX19LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfQUREUkVTUyIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9VU0VSTkFNRSIsInZhbHVlIjoiYXBpX2tleSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfUEFTU1dPUkQiLCJ2YWx1ZUZyb20iOnsic2VjcmV0S2V5UmVmIjp7Im5hbWUiOiJpbnRlcm5hbC1tb25pdG9yaW5nIiwia2V5IjoicG1tc2VydmVya2V5In19fSx7Im5hbWUiOiJQTU1fQUdFTlRfTElTVEVOX1BPUlQiLCJ2YWx1ZSI6Ijc3NzcifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE1NX0FHRU5UX0NPTkZJR19GSUxFIiwidmFsdWUiOiIvdXNyL2xvY2FsL3BlcmNvbmEvcG1tMi9jb25maWcvcG1tLWFnZW50LnlhbWwifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX0lOU0VDVVJFX1RMUyIsInZhbHVlIjoiMSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fQUREUkVTUyIsInZhbHVlIjoiMC4wLjAuMCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9TRVRVUF9NRVRSSUNTX01PREUiLCJ2"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3Vi"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsImxhc3QtYXBwbGllZC1zZWNyZXQiOiI5MGYxZGFmOGZiZmNkMjhhZWM4ZjEwMzhkODJiNDc0MGJmYjA2ZDhkODdhMTM4MmFiYTBlNjhhYjUzMDNjYzBkIiwicGVyY29uYS5jb20vY29uZmlndXJhdGlvbi1oYXNoIjoiZDQxZDhjZDk4ZjAw"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhOWNiOGMwZWVmNWExM2M2Y2Y1NGJjNTRjMzM3N2FhYiIsInBlcmNvbmEu"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhOWNiOGMwZWVmNWExM2M2Y2Y1NGJjNTRjMzM3N2FhYiIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiYWQ4MzE3YjdkN2ZmNDU4Y2ZlOGVmMDYxMjViODdjZmQifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fV0sImluaXRDb250YWluZXJzIjpbeyJuYW1lIjoicHhjLWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMzIzLTY0ZjA4NjBmIiwiY29tbWFuZCI6WyIvcHhjLWluaXQtZW50cnlwb2ludC5zaCJdLCJyZXNvdXJjZXMiOnsibGltaXRzIjp7ImNwdSI6IjUwbSIsIm1lbW9yeSI6IjUwTSJ9fSwidm9sdW1lTW91bnRzIjpbeyJuYW1lIjoiZGF0YWRpciIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifV0sImNvbnRhaW5lcnMiOlt7Im5hbWUiOiJwbW0tY2xpZW50IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BtbS1jbGllbnQ6ZGV2LWxhdGVzdCIsInBvcnRzIjpbeyJjb250YWluZXJQb3J0Ijo3Nzc3fSx7ImNvbnRhaW5lclBvcnQiOjMwMTAwfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAxfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAyfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAzfSx7ImNvbnRhaW5lclBvcnQiOjMwMTA0fSx7ImNvbnRhaW5lclBvcnQiOjMwMTA1fV0sImVudkZyb20iOlt7InNlY3JldFJlZiI6eyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwib3B0aW9uYWwiOnRydWV9fV0sImVudiI6W3sibmFtZSI6IlBNTV9TRVJWRVIiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9VU0VSIiwidmFsdWUiOiJhcGlfa2V5In0seyJuYW1lIjoiUE1NX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlcmtleSJ9fX0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTElTVEVOIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiQ0xJRU5UX1BPUlRfTUlOIiwidmFsdWUiOiIzMDEwMCJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQT0RfTkFNRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lIn19fSx7Im5hbWUiOiJQT0RfTkFNRVNQQVNFIiwidmFsdWVGcm9tIjp7ImZpZWxkUmVmIjp7ImZpZWxkUGF0aCI6Im1ldGFkYXRhLm5hbWVzcGFjZSJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9BRERSRVNTIiwidmFsdWUiOiJtb25pdG9yaW5nLXNlcnZpY2UifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1VTRVJOQU1FIiwidmFsdWUiOiJhcGlfa2V5In0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXJrZXkifX19LHsibmFtZSI6IlBNTV9BR0VOVF9MSVNURU5fUE9SVCIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IlBNTV9BR0VOVF9QT1JUU19NSU4iLCJ2YWx1ZSI6IjMwMTAwIn0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01BWCIsInZhbHVlIjoiMzAxMDUifSx7Im5hbWUiOiJQTU1fQUdFTlRfQ09ORklHX0ZJTEUiLCJ2YWx1ZSI6Ii91c3IvbG9jYWwvcGVyY29uYS9wbW0yL2NvbmZpZy9wbW0tYWdlbnQueWFtbCJ9LHsibmFtZSI6IlBNTV9BR0VOVF9T"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhOWNiOGMwZWVmNWExM2M2Y2Y1NGJjNTRjMzM3N2FhYiIsInBlcmNvbmEuY29tL3NzbC1pbnRlcm5hbC1oYXNoIjoiYWQ4MzE3YjdkN2ZmNDU4Y2ZlOGVmMDYxMjViODdjZmQifX0sInNwZWMiOnsidm9sdW1lcyI6W3sibmFtZSI6InRtcCIsImVtcHR5RGlyIjp7fX0seyJuYW1lIjoiY29uZmlnIiwiY29uZmlnTWFwIjp7Im5hbWUiOiJtb25pdG9yaW5nLXB4YyIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsLWludGVybmFsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXNzbC1pbnRlcm5hbCIsIm9wdGlvbmFsIjp0cnVlfX0seyJuYW1lIjoic3NsIiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJzb21lLW5hbWUtc3NsIiwib3B0aW9uYWwiOmZhbHNlfX0seyJuYW1lIjoiYXV0by1jb25maWciLCJjb25maWdNYXAiOnsibmFtZSI6ImF1dG8tbW9uaXRvcmluZy1weGMiLCJvcHRpb25hbCI6dHJ1ZX19LHsibmFtZSI6InZhdWx0LWtleXJpbmctc2VjcmV0Iiwic2VjcmV0Ijp7InNlY3JldE5hbWUiOiJtb25pdG9yaW5nLXZhdWx0Iiwib3B0aW9uYWwiOnRydWV9fSx7Im5hbWUiOiJteXNxbC11c2Vycy1zZWNyZXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsIm9wdGlvbmFsIjpmYWxzZX19LHsibmFtZSI6Im15c3FsLWluaXQtZmlsZSIsInNlY3JldCI6eyJzZWNyZXROYW1lIjoibW9uaXRvcmluZy1teXNxbC1pbml0Iiwib3B0aW9uYWwiOnRydWV9fV0sImluaXRDb250YWluZXJzIjpbeyJuYW1lIjoicHhjLWluaXQiLCJpbWFnZSI6InBlcmNvbmFsYWIvcGVyY29uYS14dHJhZGItY2x1c3Rlci1vcGVyYXRvcjpQUi0yMzIzLTY0ZjA4NjBmIiwiY29tbWFuZCI6WyIvcHhjLWluaXQtZW50cnlwb2ludC5zaCJdLCJyZXNvdXJjZXMiOnsibGltaXRzIjp7ImNwdSI6IjUwbSIsIm1lbW9yeSI6IjUwTSJ9fSwidm9sdW1lTW91bnRzIjpbeyJuYW1lIjoiZGF0YWRpciIsIm1vdW50UGF0aCI6Ii92YXIvbGliL215c3FsIn1dLCJpbWFnZVB1bGxQb2xpY3kiOiJBbHdheXMifV0sImNvbnRhaW5lcnMiOlt7Im5hbWUiOiJwbW0tY2xpZW50IiwiaW1hZ2UiOiJwZXJjb25hbGFiL3BtbS1jbGllbnQ6ZGV2LWxhdGVzdCIsInBvcnRzIjpbeyJjb250YWluZXJQb3J0Ijo3Nzc3fSx7ImNvbnRhaW5lclBvcnQiOjMwMTAwfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAxfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAyfSx7ImNvbnRhaW5lclBvcnQiOjMwMTAzfSx7ImNvbnRhaW5lclBvcnQiOjMwMTA0fSx7ImNvbnRhaW5lclBvcnQiOjMwMTA1fV0sImVudkZyb20iOlt7InNlY3JldFJlZiI6eyJuYW1lIjoibXktZW52LXZhci1zZWNyZXRzIiwib3B0aW9uYWwiOnRydWV9fV0sImVudiI6W3sibmFtZSI6IlBNTV9TRVJWRVIiLCJ2YWx1ZSI6Im1vbml0b3Jpbmctc2VydmljZSJ9LHsibmFtZSI6IlBNTV9VU0VSIiwidmFsdWUiOiJhZG1pbiJ9LHsibmFtZSI6IlBNTV9QQVNTV09SRCIsInZhbHVlRnJvbSI6eyJzZWNyZXRLZXlSZWYiOnsibmFtZSI6ImludGVybmFsLW1vbml0b3JpbmciLCJrZXkiOiJwbW1zZXJ2ZXIifX19LHsibmFtZSI6IkNMSUVOVF9QT1JUX0xJU1RFTiIsInZhbHVlIjoiNzc3NyJ9LHsibmFtZSI6IkNMSUVOVF9QT1JUX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJDTElFTlRfUE9SVF9NQVgiLCJ2YWx1ZSI6IjMwMTA1In0seyJuYW1lIjoiUE9EX05BTUUiLCJ2YWx1ZUZyb20iOnsiZmllbGRSZWYiOnsiZmllbGRQYXRoIjoibWV0YWRhdGEubmFtZSJ9fX0seyJuYW1lIjoiUE9EX05BTUVTUEFTRSIsInZhbHVlRnJvbSI6eyJmaWVsZFJlZiI6eyJmaWVsZFBhdGgiOiJtZXRhZGF0YS5uYW1lc3BhY2UifX19LHsibmFtZSI6IlBNTV9BR0VOVF9TRVJWRVJfQUREUkVTUyIsInZhbHVlIjoibW9uaXRvcmluZy1zZXJ2aWNlIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9VU0VSTkFNRSIsInZhbHVlIjoiYWRtaW4ifSx7Im5hbWUiOiJQTU1fQUdFTlRfU0VSVkVSX1BBU1NXT1JEIiwidmFsdWVGcm9tIjp7InNlY3JldEtleVJlZiI6eyJuYW1lIjoiaW50ZXJuYWwtbW9uaXRvcmluZyIsImtleSI6InBtbXNlcnZlciJ9fX0seyJuYW1lIjoiUE1NX0FHRU5UX0xJU1RFTl9QT1JUIiwidmFsdWUiOiI3Nzc3In0seyJuYW1lIjoiUE1NX0FHRU5UX1BPUlRTX01JTiIsInZhbHVlIjoiMzAxMDAifSx7Im5hbWUiOiJQTU1fQUdFTlRfUE9SVFNfTUFYIiwidmFsdWUiOiIzMDEwNSJ9LHsibmFtZSI6IlBNTV9BR0VOVF9DT05GSUdfRklMRSIsInZhbHVlIjoiL3Vzci9sb2NhbC9wZXJjb25hL3BtbTIvY29uZmlnL3BtbS1hZ2VudC55YW1sIn0seyJuYW1lIjoiUE1NX0FHRU5UX1NFUlZFUl9JTlNF"..., - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6Mywic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJtb25pdG9yaW5nIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImQ0MWQ4Y2Q5OGYwMGIyMDRlOTgwMDk5OGVjZjg0MjdlIiwicGVyY29uYS5jb20vZW52LXNlY3JldC1jb25maWctaGFzaCI6IjViYzQwM2E4ZDQ0MzI0YzFiYzcwNGYwMmU3NDg3"..., "percona.com/ssl-hash": "a9cb8c0eef5a13c6cf54bc54c3377aab", "percona.com/ssl-internal-hash": "ad8317b7d7ff458cfe8ef06125b87cfd", + PeriodSeconds: 0, - PeriodSeconds: 10, + PersistentVolumeClaimRetentionPolicy: nil, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + Phase: "", - Phase: "Pending", + PodManagementPolicy: "", - PodManagementPolicy: "OrderedReady", Port: {IntVal: 7777}, Ports: []v1.ContainerPort{ PreemptionPolicy: nil, ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, ProbeHandler: v1.ProbeHandler{ + Protocol: "", - Protocol: "TCP", Quobyte: nil, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, ReadinessProbe: &v1.Probe{ + ReadyReplicas: 0, - ReadyReplicas: 2, - ReadyReplicas: 3, + Replicas: 0, + Replicas: &0, Replicas: &2, - Replicas: 2, - Replicas: &2, Replicas: &3, - Replicas: 3, - Replicas: &3, ResizePolicy: nil, ResourceFieldRef: nil, Resources: {Limits: {s"cpu": {i: {...}, Format: "DecimalSI"}, s"memory": {i: {...}, Format: "DecimalSI"}}, Requests: {s"cpu": {i: {...}, s: "308m", Format: "DecimalSI"}, s"memory": {i: {...}, s: "508M", Format: "DecimalSI"}}}, + ResourceVersion: "", - ResourceVersion: "1766055335207791019", - ResourceVersion: "1766055455651999012", - ResourceVersion: "1766055695933919019", - ResourceVersion: "1766055780045871012", - ResourceVersion: "1766055846784655019", - ResourceVersion: "1766055951171871012", - ResourceVersion: "1766056005370591019", - ResourceVersion: "1766056094728367012", + RestartPolicy: "", - RestartPolicy: "Always", - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, + SchedulerName: "", + SchedulerName: "", - SchedulerName: "default-scheduler", - SchedulerName: "default-scheduler", + Scheme: "", - Scheme: "HTTP", SecretKeyRef: &v1.SecretKeySelector{ SecretName: "internal-monitoring", SecretName: "monitoring-mysql-init", SecretName: "monitoring-ssl-internal", SecretName: "monitoring-vault", SecretName: "my-env-var-secrets", SecretName: "some-name-ssl", Secret: &v1.SecretVolumeSource{ SecurityContext: nil, + SecurityContext: nil, - SecurityContext: s"&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,FSGroupChangePolicy:nil,SeccompProfile:nil,AppArmorProfile:nil,SupplementalGroupsPolicy:nil,SELinux"..., Selector: &{MatchLabels: {"app.kubernetes.io/component": "haproxy", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "monitoring", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, SelfLink: "", ServiceAccountName: "default", ServiceName: "monitoring-haproxy", ServiceName: "monitoring-pxc", SetHostnameAsFQDN: nil, ShareProcessNamespace: nil, Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PodSpec{ Spec: v1.StatefulSetSpec{ StartupProbe: nil, Status: v1.PersistentVolumeClaimStatus{ Status: v1.StatefulSetStatus{ StorageClassName: nil, Subdomain: "", Subdomain: "", - Subresource: "status", SuccessThreshold: 1, TCPSocket: nil, Template: v1.PodTemplateSpec{ TerminationGracePeriodSeconds: &30, TerminationGracePeriodSeconds: &600, TerminationGracePeriodSeconds: nil, + TerminationMessagePath: "", - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePolicy: "", - TerminationMessagePolicy: "File", TimeoutSeconds: 15, TimeoutSeconds: 5, TimeoutSeconds: 5, - Time: s"2025-12-18 10:53:52 +0000 UTC", - Time: s"2025-12-18 10:53:53 +0000 UTC", - Time: s"2025-12-18 10:55:35 +0000 UTC", - Time: s"2025-12-18 10:57:35 +0000 UTC", - Time: s"2025-12-18 11:00:40 +0000 UTC", - Time: s"2025-12-18 11:00:41 +0000 UTC", - Time: s"2025-12-18 11:01:35 +0000 UTC", - Time: s"2025-12-18 11:03:00 +0000 UTC", - Time: s"2025-12-18 11:03:13 +0000 UTC", - Time: s"2025-12-18 11:04:06 +0000 UTC", - Time: s"2025-12-18 11:05:51 +0000 UTC", - Time: s"2025-12-18 11:05:56 +0000 UTC", - Time: s"2025-12-18 11:06:45 +0000 UTC", - Time: s"2025-12-18 11:08:14 +0000 UTC", Tolerations: nil, Tolerations: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, TypeMeta: {}, TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, + UID: "", - UID: "8932b9e7-859b-403e-9c36-63dede70b8f4", - UID: "e2540e9a-f636-4a4d-b6e1-7078908faa0b", + UpdatedReplicas: 0, - UpdatedReplicas: 2, - UpdatedReplicas: 3, + UpdateRevision: "", - UpdateRevision: "monitoring-haproxy-6c887849fd", - UpdateRevision: "monitoring-haproxy-77457689cb", - UpdateRevision: "monitoring-haproxy-86cc4bf678", - UpdateRevision: "monitoring-haproxy-fcdccd58d", - UpdateRevision: "monitoring-pxc-5f7ffbf476", - UpdateRevision: "monitoring-pxc-695f496d6d", - UpdateRevision: "monitoring-pxc-6cb86df5fd", - UpdateRevision: "monitoring-pxc-7b88d9cd54", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, &v1.StatefulSet{ Value: "", - Value: "admin", + Value: "api_key", ValueFrom: nil, ValueFrom: &v1.EnvVarSource{ + Value: "$(PMM_PREFIX)$(POD_NAMESPASE)-$(POD_NAME)", - Value: "$(POD_NAMESPASE)-$(POD_NAME)", VolumeAttributesClassName: nil, VolumeClaimTemplates: nil, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ VolumeDevices: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, VolumeMounts: {{Name: "bin", MountPath: "/var/lib/mysql"}}, VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}}, VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeName: "", VolumeSource: v1.VolumeSource{ Volumes: []v1.Volume{ VsphereVolume: nil, WorkingDir: "", + grep -v NAMESPACE + kubectl get pxc --all-namespaces -o wide + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n monitoring-2-0-18368 monitoring --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/monitoring patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.rjSjDI3FwC ++ mktemp + local LAST_ERR=/tmp/tmp.Y0Y1y8zCDR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rjSjDI3FwC perconaxtradbcluster.pxc.percona.com "monitoring" deleted from monitoring-2-0-18368 namespace + cat /tmp/tmp.Y0Y1y8zCDR + rm /tmp/tmp.rjSjDI3FwC /tmp/tmp.Y0Y1y8zCDR + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.GknkHti71n ++ mktemp + local LAST_ERR=/tmp/tmp.mVErkVSjlQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GknkHti71n No resources found + cat /tmp/tmp.mVErkVSjlQ + rm /tmp/tmp.GknkHti71n /tmp/tmp.mVErkVSjlQ + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.fPWN0TNE54 ++ mktemp + local LAST_ERR=/tmp/tmp.AhP56K6u9c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.fPWN0TNE54 No resources found + cat /tmp/tmp.AhP56K6u9c + rm /tmp/tmp.fPWN0TNE54 /tmp/tmp.AhP56K6u9c + return 0 + kubectl_bin delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook ++ mktemp + local LAST_OUT=/tmp/tmp.pG47wPwc3V ++ mktemp + local LAST_ERR=/tmp/tmp.Mg3go7kYrm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete ValidatingWebhookConfiguration percona-xtradbcluster-webhook + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pG47wPwc3V validatingwebhookconfiguration.admissionregistration.k8s.io "percona-xtradbcluster-webhook" deleted + cat /tmp/tmp.Mg3go7kYrm + rm /tmp/tmp.pG47wPwc3V /tmp/tmp.Mg3go7kYrm + return 0 + kubectl_bin delete -f https://github.com/jetstack/cert-manager/releases/download/v1.18.2/cert-manager.yaml + : + '[' '!' -z '' ']' + '[' -n pxc-operator ']' + kubectl_bin delete --grace-period=0 --force=true namespace monitoring-2-0-18368 + rm -rf /tmp/tmp.u1cffxxmJX + kubectl_bin delete --grace-period=0 --force=true namespace pxc-operator ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.TJGTmBH0t3 + desc 'test passed' + set +o xtrace ----------------------------------------------------------------------------------- test passed ----------------------------------------------------------------------------------- + local LAST_OUT=/tmp/tmp.l5nBNneFBJ ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.UoxloZRwqL + local exit_status=0 + local LAST_ERR=/tmp/tmp.HgzoyDW5FX + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace pxc-operator + for i in '$(seq 0 2)' + set +e + kubectl delete --grace-period=0 --force=true namespace monitoring-2-0-18368