Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/logs/one-pod-5-7.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra one-pod-31237 + local ns=one-pod-31237 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n one-pod-17162 one-pod --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/one-pod patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.Y8F4kZTAnK ++ mktemp + local LAST_ERR=/tmp/tmp.wPkW5wIiDZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Y8F4kZTAnK perconaxtradbcluster.pxc.percona.com "one-pod" deleted from one-pod-17162 namespace + cat /tmp/tmp.wPkW5wIiDZ + rm /tmp/tmp.Y8F4kZTAnK /tmp/tmp.wPkW5wIiDZ + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.8G5s41c916 ++ mktemp + local LAST_ERR=/tmp/tmp.fx6Jj5E9YJ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8G5s41c916 perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-aws-s3" deleted from one-pod-17162 namespace perconaxtradbclusterbackup.pxc.percona.com "on-demand-backup-pvc" deleted from one-pod-17162 namespace + cat /tmp/tmp.fx6Jj5E9YJ + rm /tmp/tmp.8G5s41c916 /tmp/tmp.fx6Jj5E9YJ + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.n6CUj8XH4Y ++ mktemp + local LAST_ERR=/tmp/tmp.UVo3SrtXBm + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.n6CUj8XH4Y No resources found + cat /tmp/tmp.UVo3SrtXBm + rm /tmp/tmp.n6CUj8XH4Y /tmp/tmp.UVo3SrtXBm + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ awk '{print $1}' ++ grep chaos-mesh + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep validate-auth ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator ++ mktemp + awk '{print$1}' + local LAST_OUT=/tmp/tmp.qkea8ns47v ++ mktemp + kubectl_bin get ns + local LAST_ERR=/tmp/tmp.W2PeEjVHqU + local exit_status=0 ++ mktemp + local LAST_OUT=/tmp/tmp.pNpNNT7PJQ ++ mktemp + local LAST_ERR=/tmp/tmp.5QiF4vp8lr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + xargs kubectl delete ns ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.pNpNNT7PJQ + cat /tmp/tmp.5QiF4vp8lr + rm /tmp/tmp.pNpNNT7PJQ /tmp/tmp.5QiF4vp8lr + return 0 namespace "one-pod-17162" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qkea8ns47v namespace "pxc-operator" deleted + cat /tmp/tmp.W2PeEjVHqU + rm /tmp/tmp.qkea8ns47v /tmp/tmp.W2PeEjVHqU + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.PWZsSMVbGx ++ mktemp + local LAST_ERR=/tmp/tmp.UbdgWc1BjM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PWZsSMVbGx namespace/pxc-operator created + cat /tmp/tmp.UbdgWc1BjM + rm /tmp/tmp.PWZsSMVbGx /tmp/tmp.UbdgWc1BjM + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.PZmGMRgKoV +++ mktemp ++ local LAST_ERR=/tmp/tmp.NazQAGtQC2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PZmGMRgKoV ++ cat /tmp/tmp.NazQAGtQC2 ++ rm /tmp/tmp.PZmGMRgKoV /tmp/tmp.NazQAGtQC2 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.K5WZHXO2di ++ mktemp + local LAST_ERR=/tmp/tmp.1PkYQlPoG4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.K5WZHXO2di Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6" modified. + cat /tmp/tmp.1PkYQlPoG4 + rm /tmp/tmp.K5WZHXO2di /tmp/tmp.1PkYQlPoG4 + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.4ka5diMCFQ ++ mktemp + local LAST_ERR=/tmp/tmp.JXIoGfUH4Y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4ka5diMCFQ customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.JXIoGfUH4Y + rm /tmp/tmp.4ka5diMCFQ /tmp/tmp.JXIoGfUH4Y + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.skt9lf8bI3 ++ mktemp + local LAST_ERR=/tmp/tmp.4i27i2YWBp + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.skt9lf8bI3 clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.4i27i2YWBp + rm /tmp/tmp.skt9lf8bI3 /tmp/tmp.4i27i2YWBp + return 0 + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2076-ee1be0ba^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.5arMYUABJB ++ mktemp + local LAST_ERR=/tmp/tmp.h4uxwBfzhM + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.5arMYUABJB deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.h4uxwBfzhM + rm /tmp/tmp.5arMYUABJB /tmp/tmp.h4uxwBfzhM + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.GXlrk4yLuD ++ mktemp + local LAST_ERR=/tmp/tmp.jgzi6j5mYx + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.GXlrk4yLuD pod/percona-xtradb-cluster-operator-84658c6c47-9rq72 condition met + cat /tmp/tmp.jgzi6j5mYx + rm /tmp/tmp.GXlrk4yLuD /tmp/tmp.jgzi6j5mYx + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.E3QaP2Wwz0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.3zSnwwuzu5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.E3QaP2Wwz0 ++ cat /tmp/tmp.3zSnwwuzu5 ++ rm /tmp/tmp.E3QaP2Wwz0 /tmp/tmp.3zSnwwuzu5 ++ return 0 + wait_pod percona-xtradb-cluster-operator-84658c6c47-9rq72 480 pxc-operator + local pod=percona-xtradb-cluster-operator-84658c6c47-9rq72 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-84658c6c47-9rq72 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-84658c6c47-9rq72 condition met waiting for pod/percona-xtradb-cluster-operator-84658c6c47-9rq72 to become Ready.Ok + sleep 3 + create_namespace one-pod-31237 + local namespace=one-pod-31237 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ sed s/NAMESPACE// ++ awk '-F ' '{print $2}' + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + xargs kubectl delete ns + desc 'cleaned up old namespaces one-pod-31237' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces one-pod-31237 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace one-pod-31237 + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.kfDqW3SgLg ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.VxW5iXDhZF + local exit_status=0 + local LAST_OUT=/tmp/tmp.r9WL3WvxjS ++ mktemp ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + local LAST_ERR=/tmp/tmp.QwIorcUSfk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace one-pod-31237 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kfDqW3SgLg + cat /tmp/tmp.VxW5iXDhZF + rm /tmp/tmp.kfDqW3SgLg /tmp/tmp.VxW5iXDhZF + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + return 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace one-pod-31237 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace one-pod-31237 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.r9WL3WvxjS + cat /tmp/tmp.QwIorcUSfk Error from server (NotFound): namespaces "one-pod-31237" not found + rm /tmp/tmp.r9WL3WvxjS /tmp/tmp.QwIorcUSfk + return 1 + : + wait_for_delete namespace/one-pod-31237 + local res=namespace/one-pod-31237 + echo -n 'waiting for namespace/one-pod-31237 to be deleted' waiting for namespace/one-pod-31237 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "one-pod-31237" not found + desc 'create namespace one-pod-31237' + set +o xtrace ----------------------------------------------------------------------------------- create namespace one-pod-31237 ----------------------------------------------------------------------------------- + kubectl_bin create namespace one-pod-31237 ++ mktemp + local LAST_OUT=/tmp/tmp.h6Wfc0h7tl ++ mktemp + local LAST_ERR=/tmp/tmp.uLRc69vRmr + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace one-pod-31237 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.h6Wfc0h7tl namespace/one-pod-31237 created + cat /tmp/tmp.uLRc69vRmr + rm /tmp/tmp.h6Wfc0h7tl /tmp/tmp.uLRc69vRmr + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.IcL5Pc3oja +++ mktemp ++ local LAST_ERR=/tmp/tmp.sgeOaOUOjX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IcL5Pc3oja ++ cat /tmp/tmp.sgeOaOUOjX ++ rm /tmp/tmp.IcL5Pc3oja /tmp/tmp.sgeOaOUOjX ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6 --namespace=one-pod-31237 ++ mktemp + local LAST_OUT=/tmp/tmp.oQOCHDNiAQ ++ mktemp + local LAST_ERR=/tmp/tmp.Sn6oz1vdQF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6 --namespace=one-pod-31237 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.oQOCHDNiAQ Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-ee1be0ba-8-cluster6" modified. + cat /tmp/tmp.Sn6oz1vdQF + rm /tmp/tmp.oQOCHDNiAQ /tmp/tmp.Sn6oz1vdQF + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yPlLR8pHIM ++ mktemp + local LAST_ERR=/tmp/tmp.yjAtPRZ3Gj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yPlLR8pHIM secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.yjAtPRZ3Gj + rm /tmp/tmp.yPlLR8pHIM /tmp/tmp.yjAtPRZ3Gj + return 0 + cluster=one-pod + spinup_pxc one-pod /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml 1 + local cluster=one-pod + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml + local size=1 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.yb04mOcCrJ ++ mktemp + local LAST_ERR=/tmp/tmp.Fu2IwrXaN7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.yb04mOcCrJ secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.Fu2IwrXaN7 + rm /tmp/tmp.yb04mOcCrJ /tmp/tmp.Fu2IwrXaN7 + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + local pvc_name= + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + local pvc_name= + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_OUT=/tmp/tmp.6dUZUmYzv6 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-ee1be0ba#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.one-pod-31237~ + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' ++ mktemp + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + local LAST_ERR=/tmp/tmp.AMx5orasFv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.6dUZUmYzv6 deployment.apps/pxc-client created + cat /tmp/tmp.AMx5orasFv + rm /tmp/tmp.6dUZUmYzv6 /tmp/tmp.AMx5orasFv + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/one-pod.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.one-pod-31237~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-ee1be0ba#' ++ mktemp + local LAST_OUT=/tmp/tmp.doYL3zxD6W ++ mktemp + local LAST_ERR=/tmp/tmp.TOPeWFzbra + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.doYL3zxD6W perconaxtradbcluster.pxc.percona.com/one-pod created + cat /tmp/tmp.TOPeWFzbra + rm /tmp/tmp.doYL3zxD6W /tmp/tmp.TOPeWFzbra + return 0 + desc 'check if all 1 pods are started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 1 pods are started ----------------------------------------------------------------------------------- + wait_for_running one-pod-pxc 1 + local name=one-pod-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod one-pod-pxc-0 480 + local pod=one-pod-pxc-0 + local max_retry=480 + local ns= ++ echo one-pod-pxc-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/one-pod-pxc-0 condition met waiting for pod/one-pod-pxc-0 to become Ready.Ok + sleep 15 + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' '-h one-pod-pxc -uroot -proot_password' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY);' + local 'uri=-h one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OtGFTFxHfe +++ mktemp ++ local LAST_ERR=/tmp/tmp.AxOASBaHBy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OtGFTFxHfe ++ cat /tmp/tmp.AxOASBaHBy ++ rm /tmp/tmp.OtGFTFxHfe /tmp/tmp.AxOASBaHBy ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-5957k ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h one-pod-pxc -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ymbqjeqKZZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.cBIN96agSr ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ymbqjeqKZZ ++ cat /tmp/tmp.cBIN96agSr ++ rm /tmp/tmp.ymbqjeqKZZ /tmp/tmp.cBIN96agSr ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-5957k ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace ++ seq 0 0 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.y3dy6BQ43z +++ mktemp ++ local LAST_ERR=/tmp/tmp.a3BnCb0QaR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.y3dy6BQ43z ++ cat /tmp/tmp.a3BnCb0QaR ++ rm /tmp/tmp.y3dy6BQ43z /tmp/tmp.a3BnCb0QaR ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-5957k ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8lGSiE4sEU/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8lGSiE4sEU/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1.sql /tmp/tmp.8lGSiE4sEU/select-1.sql + compare_kubectl statefulset/one-pod-pxc + local resource=statefulset/one-pod-pxc + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc.yml + local new_result=/tmp/tmp.8lGSiE4sEU/statefulset_one-pod-pxc.yml + desc 'compare statefulset/one-pod-pxc-' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/one-pod-pxc- ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-pxc + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("one-pod-31237", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - ++ mktemp + local LAST_OUT=/tmp/tmp.XXaKz9QWEQ ++ mktemp + local LAST_ERR=/tmp/tmp.bc5Uad3A9c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/one-pod-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XXaKz9QWEQ + cat /tmp/tmp.bc5Uad3A9c + rm /tmp/tmp.XXaKz9QWEQ /tmp/tmp.bc5Uad3A9c + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-k127.yml /tmp/tmp.8lGSiE4sEU/statefulset_one-pod-pxc.yml + log 'compare_kubectl: statefulset/one-pod-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-04T12:11:01+0000]' compare_kubectl: statefulset/one-pod-pxc OK [2025-12-04T12:11:01+0000] compare_kubectl: statefulset/one-pod-pxc OK + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/config-secret.yaml + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/config-secret.yaml + local pvc_name= + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/config-secret.yaml '' + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/config-secret.yaml + local pvc_name= + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/config-secret.yaml ++ mktemp + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + local LAST_OUT=/tmp/tmp.0lyc2F6ed9 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' ++ mktemp + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: #' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.one-pod-31237~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-ee1be0ba#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.sk0d7HUjVg + local exit_status=0 + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0lyc2F6ed9 secret/one-pod-pxc created + cat /tmp/tmp.sk0d7HUjVg + rm /tmp/tmp.0lyc2F6ed9 /tmp/tmp.sk0d7HUjVg + return 0 + sleep 50 + compare_kubectl statefulset/one-pod-pxc -secret + local resource=statefulset/one-pod-pxc + local postfix=-secret + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret.yml + local new_result=/tmp/tmp.8lGSiE4sEU/statefulset_one-pod-pxc.yml + desc 'compare statefulset/one-pod-pxc--secret' + set +o xtrace ----------------------------------------------------------------------------------- compare statefulset/one-pod-pxc--secret ----------------------------------------------------------------------------------- + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-eks.yml ']' + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + version_gt 1.33 ++ echo '1.31 >= 1.33' ++ bc -l + '[' 0 -eq 1 ']' + return 1 + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k129.yml ']' + version_gt 1.27 ++ echo '1.31 >= 1.27' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127.yml ']' + expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127.yml + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127-oc.yml ']' + version_gt 1.29 ++ echo '1.31 >= 1.29' ++ bc -l + '[' 1 -eq 1 ']' + return 0 + '[' '!' -z '' -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127-k129-oc.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127-eks.yml ']' + '[' 0 = 1 -a -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127-aks.yml ']' + kubectl_bin get -o yaml statefulset/one-pod-pxc ++ mktemp + yq eval ' del(.metadata.managedFields) | del(.. | select(has("creationTimestamp")).creationTimestamp) | del(.. | select(has("namespace")).namespace) | del(.. | select(has("uid")).uid) | del(.metadata.resourceVersion) | del(.spec.template.spec.containers[].env[] | select(.name == "CLUSTER_HASH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "BACKUP_PATH")) | del(.spec.template.spec.containers[].env[] | select(.name == "S3_BUCKET_URL")) | del(.spec.template.spec.containers[].env[] | select(.name == "AZURE_CONTAINER_NAME")) | del(.metadata.selfLink) | del(.metadata.deletionTimestamp) | del(.metadata.annotations."kubectl.kubernetes.io/last-applied-configuration") | del(.metadata.annotations."kubernetes.io/psp") | del(.metadata.annotations."batch.kubernetes.io/job-tracking") | del(.metadata.labels."batch.kubernetes.io/job-name") | del(.metadata.labels."job-name") | del(.metadata.annotations."cloud.google.com/neg") | del(.metadata.annotations."k8s.v1.cni.cncf.io*") | del(.metadata.annotations."k8s.ovn.org/pod-networks") | del(.spec.template.metadata.annotations."last-applied-secret") | del(.spec.template.metadata.labels."batch.kubernetes.io/job-name") | del(.spec.template.metadata.labels."job-name") | del(.. | select(has("batch.kubernetes.io/controller-uid"))."batch.kubernetes.io/controller-uid") | del(.. | select(has("image")).image) | del(.. | select(has("clusterIP")).clusterIP) | del(.. | select(has("clusterIPs")).clusterIPs) | del(.. | select(has("dataSource")).dataSource) | del(.. | select(has("procMount")).procMount) | del(.. | select(has("storageClassName")).storageClassName) | del(.. | select(has("finalizers")).finalizers) | del(.. | select(has("kubernetes.io/pvc-protection"))."kubernetes.io/pvc-protection") | del(.. | select(has("volumeName")).volumeName) | del(.. | select(has("volume.beta.kubernetes.io/storage-provisioner"))."volume.beta.kubernetes.io/storage-provisioner") | del(.. | select(has("volume.kubernetes.io/storage-provisioner"))."volume.kubernetes.io/storage-provisioner") | del(.spec.volumeMode) | del(.spec.nodeName) | del(.. | select(has("volume.kubernetes.io/selected-node"))."volume.kubernetes.io/selected-node") | del(.. | select(has("percona.com/last-config-hash"))."percona.com/last-config-hash") | del(.. | select(has("percona.com/configuration-hash"))."percona.com/configuration-hash") | del(.. | select(has("percona.com/env-secret-config-hash"))."percona.com/env-secret-config-hash") | del(.. | select(has("percona.com/ssl-hash"))."percona.com/ssl-hash") | del(.. | select(has("percona.com/ssl-internal-hash"))."percona.com/ssl-internal-hash") | del(.. | select(has("kubectl.kubernetes.io/default-container"))."kubectl.kubernetes.io/default-container") | del(.spec.volumeClaimTemplates[].spec.volumeMode | select(. == "Filesystem")) | del(.. | select(has("healthCheckNodePort")).healthCheckNodePort) | del(.. | select(has("nodePort")).nodePort) | del(.. | select(has("imagePullSecrets")).imagePullSecrets) | del(.. | select(has("enableServiceLinks")).enableServiceLinks) | del(.status) | del(.spec.volumeClaimTemplates[].apiVersion) | del(.spec.volumeClaimTemplates[].kind) | del(.metadata.ownerReferences[].apiVersion) | del(.. | select(has("controller-uid")).controller-uid) | del(.. | select(has("preemptionPolicy")).preemptionPolicy) | del(.spec.ipFamilies) | del(.spec.ipFamilyPolicy) | (.. | select(. == "policy/v1beta1")) = "policy/v1" | del(.. | select(has("kubernetes.io/hostname"))."kubernetes.io/hostname") | (.. | select(tag == "!!str")) |= sub("one-pod-31237", "namespace") | (.. | select(tag == "!!str")) |= sub("kube-api-access-.*", "kube-api-access") | del(.. | select(has("annotations")).annotations | select(length==0)) | del(.spec.crVersion) | del(.. | select(.[] == "percona-xtradb-cluster-operator-workload-token*"))' - + local LAST_OUT=/tmp/tmp.Z2RgX7Agmt ++ mktemp + local LAST_ERR=/tmp/tmp.YTKaYAMTlg + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get -o yaml statefulset/one-pod-pxc + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.Z2RgX7Agmt + cat /tmp/tmp.YTKaYAMTlg + rm /tmp/tmp.Z2RgX7Agmt /tmp/tmp.YTKaYAMTlg + return 0 + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/statefulset_one-pod-pxc-secret-k127.yml /tmp/tmp.8lGSiE4sEU/statefulset_one-pod-pxc.yml + log 'compare_kubectl: statefulset/one-pod-pxc OK' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-12-04T12:11:53+0000]' compare_kubectl: statefulset/one-pod-pxc OK [2025-12-04T12:11:53+0000] compare_kubectl: statefulset/one-pod-pxc OK + run_backup one-pod on-demand-backup-pvc + local cluster=one-pod + local backup1=on-demand-backup-pvc + desc 'make backup' + set +o xtrace ----------------------------------------------------------------------------------- make backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/on-demand-backup-pvc.yml ++ mktemp + local LAST_OUT=/tmp/tmp.9rAOpw7jzi ++ mktemp + local LAST_ERR=/tmp/tmp.thwYfUz9tG + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/on-demand-backup-pvc.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9rAOpw7jzi perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-pvc created + cat /tmp/tmp.thwYfUz9tG + rm /tmp/tmp.9rAOpw7jzi /tmp/tmp.thwYfUz9tG + return 0 + wait_backup on-demand-backup-pvc + local backup=on-demand-backup-pvc + local status=Succeeded + set +o xtrace waiting for pxc-backup/on-demand-backup-pvc to reach Succeeded state....................................Succeeded + run_recovery_check one-pod on-demand-backup-pvc + local cluster=one-pod + local backup1=on-demand-backup-pvc + desc 'write data after backup' + set +o xtrace ----------------------------------------------------------------------------------- write data after backup ----------------------------------------------------------------------------------- + run_mysql 'INSERT myApp.myApp (id) VALUES (100501)' '-h one-pod-pxc -uroot -proot_password' + local 'command=INSERT myApp.myApp (id) VALUES (100501)' + local 'uri=-h one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mLIv1fTpFs +++ mktemp ++ local LAST_ERR=/tmp/tmp.wXz92r3zu6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mLIv1fTpFs ++ cat /tmp/tmp.wXz92r3zu6 ++ rm /tmp/tmp.mLIv1fTpFs /tmp/tmp.wXz92r3zu6 ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo pxc-client-857d976497-5957k + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + compare_mysql_cmd select-2 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local command_id=select-2 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-2.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-2-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zWqvg9K4Dv +++ mktemp ++ local LAST_ERR=/tmp/tmp.lXCyFaR0U5 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zWqvg9K4Dv ++ cat /tmp/tmp.lXCyFaR0U5 ++ rm /tmp/tmp.zWqvg9K4Dv /tmp/tmp.lXCyFaR0U5 ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-5957k ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8lGSiE4sEU/select-2.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8lGSiE4sEU/select-2.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-2.sql /tmp/tmp.8lGSiE4sEU/select-2.sql + desc 'recover backup' + set +o xtrace ----------------------------------------------------------------------------------- recover backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/restore-on-demand-backup-pvc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rO5mtZgQyF ++ mktemp + local LAST_ERR=/tmp/tmp.LOx4HS28w9 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/restore-on-demand-backup-pvc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rO5mtZgQyF perconaxtradbclusterrestore.pxc.percona.com/on-demand-backup-pvc created + cat /tmp/tmp.LOx4HS28w9 + rm /tmp/tmp.rO5mtZgQyF /tmp/tmp.LOx4HS28w9 + return 0 + wait_backup_restore on-demand-backup-pvc + local backup_name=on-demand-backup-pvc + local target_state=Succeeded + local wait_time=720 + set +o xtrace waiting for pxc-restore/on-demand-backup-pvc to reach Succeeded state 2025-12-04T12:13:15 pxc-restore/on-demand-backup-pvc state: Starting 2025-12-04T12:13:17 pxc-restore/on-demand-backup-pvc state: Starting 2025-12-04T12:13:19 pxc-restore/on-demand-backup-pvc state: Starting 2025-12-04T12:13:21 pxc-restore/on-demand-backup-pvc state: Starting 2025-12-04T12:13:23 pxc-restore/on-demand-backup-pvc state: Starting 2025-12-04T12:13:25 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:27 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:29 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:32 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:34 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:36 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:38 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:41 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:43 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:45 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:48 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:51 pxc-restore/on-demand-backup-pvc state: Stopping Cluster 2025-12-04T12:13:54 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:13:56 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:13:58 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:01 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:04 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:06 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:08 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:10 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:12 pxc-restore/on-demand-backup-pvc state: Restoring 2025-12-04T12:14:15 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:17 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:19 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:21 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:23 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:25 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:28 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:30 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:32 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:35 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:38 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:40 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:42 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:44 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:46 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:48 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:50 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:53 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:55 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:14:58 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:00 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:02 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:04 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:06 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:08 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:11 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:13 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:15 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:17 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:20 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:22 pxc-restore/on-demand-backup-pvc state: Preparing Cluster 2025-12-04T12:15:24 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:27 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:29 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:31 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:33 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:36 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:38 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:40 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:43 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:45 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:47 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:49 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:52 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:54 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:56 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:15:58 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:16:01 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:16:03 pxc-restore/on-demand-backup-pvc state: Starting Cluster 2025-12-04T12:16:05 pxc-restore/on-demand-backup-pvc state: Succeeded + kubectl_bin logs job/restore-job-on-demand-backup-pvc-one-pod ++ mktemp + local LAST_OUT=/tmp/tmp.C2F08zlyUO ++ mktemp + local LAST_ERR=/tmp/tmp.2x7KvPhb3w + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl logs job/restore-job-on-demand-backup-pvc-one-pod + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.C2F08zlyUO + LIB_PATH=/opt/percona/backup/lib/pxc + . /opt/percona/backup/lib/pxc/check-version.sh + . /opt/percona/backup/lib/pxc/vault.sh ++ set -o errexit ++ keyring_vault=/etc/mysql/vault-keyring-secret/keyring_vault.conf + SOCAT_OPTS=TCP:restore-src-on-demand-backup-pvc-one-pod:3307,retry=30 + check_ssl + CA=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt + '[' -f /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt ']' + SSL_DIR=/etc/mysql/ssl + '[' -f /etc/mysql/ssl/ca.crt ']' + CA=/etc/mysql/ssl/ca.crt + SSL_INTERNAL_DIR=/etc/mysql/ssl-internal + '[' -f /etc/mysql/ssl-internal/ca.crt ']' + CA=/etc/mysql/ssl-internal/ca.crt + KEY=/etc/mysql/ssl/tls.key + CERT=/etc/mysql/ssl/tls.crt + '[' -f /etc/mysql/ssl-internal/tls.key ']' + '[' -f /etc/mysql/ssl-internal/tls.crt ']' + KEY=/etc/mysql/ssl-internal/tls.key + CERT=/etc/mysql/ssl-internal/tls.crt + '[' -f /etc/mysql/ssl-internal/ca.crt ']' + '[' -f /etc/mysql/ssl-internal/tls.key ']' + '[' -f /etc/mysql/ssl-internal/tls.crt ']' + SOCAT_OPTS='openssl-connect:restore-src-on-demand-backup-pvc-one-pod:3307,reuseaddr,cert=/etc/mysql/ssl-internal/tls.crt,key=/etc/mysql/ssl-internal/tls.key,cafile=/etc/mysql/ssl-internal/ca.crt,verify=1,commonname='\'''\'',retry=30,no-sni=1' + ping -c1 restore-src-on-demand-backup-pvc-one-pod /opt/percona/backup/recovery-pvc-joiner.sh: line 40: ping: command not found + : + rm -rf /datadir/auth_plugin /datadir/auto.cnf /datadir/binlog.000001 /datadir/binlog.000002 /datadir/binlog.000003 /datadir/binlog.000004 /datadir/binlog.000005 /datadir/binlog.index /datadir/galera.cache /datadir/get-pxc-state /datadir/grastate.dat /datadir/ib_buffer_pool /datadir/ib_logfile0 /datadir/ib_logfile1 /datadir/ibdata1 /datadir/innobackup.backup.log /datadir/liveness-check.sh /datadir/myApp /datadir/mysql /datadir/mysql-state-monitor /datadir/peer-list /datadir/performance_schema /datadir/pmm-prerun.sh /datadir/prepare_restored_cluster.sh /datadir/private_key.pem /datadir/public_key.pem /datadir/pxc-configure-pxc.sh /datadir/pxc-entrypoint.sh /datadir/readiness-check.sh /datadir/sys /datadir/version_info /datadir/wsrep_cmd_notify_handler.sh ++ mktemp --directory /datadir/pxc_sst_XXXX + tmp=/datadir/pxc_sst_N5Jp + socat -u 'openssl-connect:restore-src-on-demand-backup-pvc-one-pod:3307,reuseaddr,cert=/etc/mysql/ssl-internal/tls.crt,key=/etc/mysql/ssl-internal/tls.key,cafile=/etc/mysql/ssl-internal/ca.crt,verify=1,commonname='\'''\'',retry=30,no-sni=1' stdio ++ get_xtrabackup_version +++ xtrabackup --version +++ grep '^xtrabackup version' +++ sed 's/-.*//' +++ awk '{print $3}' ++ xtrabackup_ver=2.4.29 ++ echo 2.4.29 + XTRABACKUP_VERSION=2.4.29 + check_for_version 2.4.29 8.0.0 + '[' -z 2.4.29 ']' + '[' -z 8.0.0 ']' + local local_version_str + local required_version_str ++ normalize_version 2.4.29 ++ local major=0 ++ local minor=0 ++ local patch=0 ++ [[ 2.4.29 =~ ^([0-9]+)\.([0-9]+)\.?([0-9]*)([^ ])* ]] ++ major=2 ++ minor=4 ++ patch=29 ++ printf %02d%02d%02d 2 4 29 + local_version_str=020429 ++ normalize_version 8.0.0 ++ local major=0 ++ local minor=0 ++ local patch=0 ++ [[ 8.0.0 =~ ^([0-9]+)\.([0-9]+)\.?([0-9]*)([^ ])* ]] ++ major=8 ++ minor=0 ++ patch=0 ++ printf %02d%02d%02d 8 0 0 + required_version_str=080000 + [[ 020429 < 080000 ]] + return 1 + socat -u 'openssl-connect:restore-src-on-demand-backup-pvc-one-pod:3307,reuseaddr,cert=/etc/mysql/ssl-internal/tls.crt,key=/etc/mysql/ssl-internal/tls.key,cafile=/etc/mysql/ssl-internal/ca.crt,verify=1,commonname='\'''\'',retry=30,no-sni=1' stdio ++ grep -c processor /proc/cpuinfo + xbstream -x -C /datadir/pxc_sst_N5Jp --parallel=4 + PXB_VAULT_PREPARE_ARGS= + PXB_VAULT_MOVEBACK_ARGS= + VAULT_CONFIG_FILE=/etc/mysql/vault-keyring-secret/keyring_vault.conf + VAULT_KEYRING_COMPONENT=/opt/percona/component_keyring_vault.cnf + [[ -f /etc/mysql/vault-keyring-secret/keyring_vault.conf ]] + set +o xtrace vault configuration not found xtrabackup: recognized server arguments: --parallel=4 xtrabackup: recognized client arguments: 251204 12:14:05 innobackupex: Starting the decrypt and decompress operation IMPORTANT: Please check that the decrypt and decompress run completes successfully. At the end of a successful decrypt and decompress run innobackupex prints "completed OK!". innobackupex version 2.4.29 based on MySQL server 5.7.44 Linux (x86_64) (revision id: 2e6c0951) 251204 12:14:05 completed OK! + xtrabackup --use-memory=100MB --prepare --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=/datadir/pxc_sst_N5Jp xtrabackup: recognized server arguments: --innodb_checksum_algorithm=crc32 --innodb_log_checksum_algorithm=strict_crc32 --innodb_data_file_path=ibdata1:12M:autoextend --innodb_log_files_in_group=2 --innodb_log_file_size=50331648 --innodb_fast_checksum=0 --innodb_page_size=16384 --innodb_log_block_size=512 --innodb_undo_directory=./ --innodb_undo_tablespaces=0 --server-id=23550830 --redo-log-version=1 --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin xtrabackup: recognized client arguments: --use-memory=100MB --prepare=1 --target-dir=/datadir/pxc_sst_N5Jp xtrabackup version 2.4.29 based on MySQL server 5.7.44 Linux (x86_64) (revision id: 2e6c0951) xtrabackup: cd to /datadir/pxc_sst_N5Jp/ xtrabackup: This target seems to be not prepared yet. InnoDB: Number of pools: 1 xtrabackup: xtrabackup_logfile detected: size=8388608, start_lsn=(12198286) xtrabackup: using the following InnoDB configuration for recovery: xtrabackup: innodb_data_home_dir = . xtrabackup: innodb_data_file_path = ibdata1:12M:autoextend xtrabackup: innodb_log_group_home_dir = . xtrabackup: innodb_log_files_in_group = 1 xtrabackup: innodb_log_file_size = 8388608 xtrabackup: using the following InnoDB configuration for recovery: xtrabackup: innodb_data_home_dir = . xtrabackup: innodb_data_file_path = ibdata1:12M:autoextend xtrabackup: innodb_log_group_home_dir = . xtrabackup: innodb_log_files_in_group = 1 xtrabackup: innodb_log_file_size = 8388608 xtrabackup: Starting InnoDB instance for recovery. xtrabackup: Using 104857600 bytes for buffer pool (set by --use-memory parameter) InnoDB: PUNCH HOLE support available InnoDB: Mutexes and rw_locks use GCC atomic builtins InnoDB: Uses event mutexes InnoDB: GCC builtin __atomic_thread_fence() is used for memory barrier InnoDB: Compressed tables use zlib 1.2.13 InnoDB: Number of pools: 1 InnoDB: Using CPU crc32 instructions InnoDB: Initializing buffer pool, total size = 100M, instances = 1, chunk size = 100M InnoDB: Completed initialization of buffer pool InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority(). InnoDB: Highest supported file format is Barracuda. InnoDB: Log scan progressed past the checkpoint lsn 12198286 InnoDB: Doing recovery: scanned up to log sequence number 12198324 (0%) InnoDB: Database was not shutdown normally! InnoDB: Starting crash recovery. InnoDB: xtrabackup: Last MySQL binlog file position 194, file name binlog.000004 InnoDB: Creating shared tablespace for temporary tables InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ... InnoDB: File './ibtmp1' size is now 12 MB. InnoDB: 96 redo rollback segment(s) found. 1 redo rollback segment(s) are active. InnoDB: 32 non-redo rollback segment(s) are active. InnoDB: 5.7.44 started; log sequence number 12198324 InnoDB: xtrabackup: Last MySQL binlog file position 194, file name binlog.000004 xtrabackup: Recovered WSREP position: 1e254a4a-d10a-11f0-9b4f-ff83542ea6cc:28 xtrabackup: starting shutdown with innodb_fast_shutdown = 1 InnoDB: FTS optimize thread exiting. InnoDB: Starting shutdown... InnoDB: Shutdown completed; log sequence number 12198343 InnoDB: Number of pools: 1 xtrabackup: using the following InnoDB configuration for recovery: xtrabackup: innodb_data_home_dir = . xtrabackup: innodb_data_file_path = ibdata1:12M:autoextend xtrabackup: innodb_log_group_home_dir = . xtrabackup: innodb_log_files_in_group = 2 xtrabackup: innodb_log_file_size = 50331648 InnoDB: PUNCH HOLE support available InnoDB: Mutexes and rw_locks use GCC atomic builtins InnoDB: Uses event mutexes InnoDB: GCC builtin __atomic_thread_fence() is used for memory barrier InnoDB: Compressed tables use zlib 1.2.13 InnoDB: Number of pools: 1 InnoDB: Using CPU crc32 instructions InnoDB: Initializing buffer pool, total size = 100M, instances = 1, chunk size = 100M InnoDB: Completed initialization of buffer pool InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority(). InnoDB: Setting log file ./ib_logfile101 size to 48 MB InnoDB: Setting log file ./ib_logfile1 size to 48 MB InnoDB: Renaming log file ./ib_logfile101 to ./ib_logfile0 InnoDB: New log files created, LSN=12198343 InnoDB: Highest supported file format is Barracuda. InnoDB: Log scan progressed past the checkpoint lsn 12198412 InnoDB: Doing recovery: scanned up to log sequence number 12198421 (0%) InnoDB: Database was not shutdown normally! InnoDB: Starting crash recovery. InnoDB: xtrabackup: Last MySQL binlog file position 194, file name binlog.000004 InnoDB: Removed temporary tablespace data file: "ibtmp1" InnoDB: Creating shared tablespace for temporary tables InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ... InnoDB: File './ibtmp1' size is now 12 MB. InnoDB: 96 redo rollback segment(s) found. 1 redo rollback segment(s) are active. InnoDB: 32 non-redo rollback segment(s) are active. InnoDB: 5.7.44 started; log sequence number 12198421 xtrabackup: starting shutdown with innodb_fast_shutdown = 1 InnoDB: FTS optimize thread exiting. InnoDB: Starting shutdown... InnoDB: Shutdown completed; log sequence number 12198440 251204 12:14:09 completed OK! + xtrabackup --defaults-group=mysqld --datadir=/datadir --move-back --force-non-empty-directories --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin --target-dir=/datadir/pxc_sst_N5Jp xtrabackup: recognized server arguments: --defaults_group=mysqld --datadir=/datadir --xtrabackup-plugin-dir=/usr/lib64/xtrabackup/plugin xtrabackup: recognized client arguments: --move-back=1 --force-non-empty-directories=1 --target-dir=/datadir/pxc_sst_N5Jp xtrabackup version 2.4.29 based on MySQL server 5.7.44 Linux (x86_64) (revision id: 2e6c0951) 251204 12:14:09 [01] Moving ib_logfile0 to /datadir/ib_logfile0 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ib_logfile1 to /datadir/ib_logfile1 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ibdata1 to /datadir/ibdata1 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./ib_buffer_pool to /datadir/ib_buffer_pool 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./xtrabackup_binlog_pos_innodb to /datadir/xtrabackup_binlog_pos_innodb 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./xtrabackup_master_key_id to /datadir/xtrabackup_master_key_id 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sst_info to /datadir/sst_info 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./xtrabackup_info to /datadir/xtrabackup_info 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./myApp/myApp.ibd to /datadir/myApp/myApp.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./myApp/myApp.frm to /datadir/myApp/myApp.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./myApp/db.opt to /datadir/myApp/db.opt 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/general_log.frm to /datadir/mysql/general_log.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/innodb_table_stats.frm to /datadir/mysql/innodb_table_stats.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/event.frm to /datadir/mysql/event.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/general_log.CSV to /datadir/mysql/general_log.CSV 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_relation.ibd to /datadir/mysql/help_relation.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_topic.frm to /datadir/mysql/help_topic.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_transition.frm to /datadir/mysql/time_zone_transition.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/columns_priv.frm to /datadir/mysql/columns_priv.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/columns_priv.MYI to /datadir/mysql/columns_priv.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_transition_type.frm to /datadir/mysql/time_zone_transition_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/event.MYI to /datadir/mysql/event.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/engine_cost.frm to /datadir/mysql/engine_cost.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/user.frm to /datadir/mysql/user.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slow_log.CSV to /datadir/mysql/slow_log.CSV 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/innodb_index_stats.frm to /datadir/mysql/innodb_index_stats.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_relay_log_info.frm to /datadir/mysql/slave_relay_log_info.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/db.MYI to /datadir/mysql/db.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proc.MYI to /datadir/mysql/proc.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/tables_priv.MYD to /datadir/mysql/tables_priv.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_master_info.ibd to /datadir/mysql/slave_master_info.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/func.frm to /datadir/mysql/func.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/ndb_binlog_index.MYI to /datadir/mysql/ndb_binlog_index.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/innodb_table_stats.ibd to /datadir/mysql/innodb_table_stats.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_keyword.frm to /datadir/mysql/help_keyword.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_name.ibd to /datadir/mysql/time_zone_name.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proxies_priv.MYD to /datadir/mysql/proxies_priv.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/tables_priv.MYI to /datadir/mysql/tables_priv.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proc.MYD to /datadir/mysql/proc.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/server_cost.ibd to /datadir/mysql/server_cost.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/procs_priv.MYD to /datadir/mysql/procs_priv.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proc.frm to /datadir/mysql/proc.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/tables_priv.frm to /datadir/mysql/tables_priv.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_master_info.frm to /datadir/mysql/slave_master_info.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/procs_priv.MYI to /datadir/mysql/procs_priv.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slow_log.frm to /datadir/mysql/slow_log.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone.frm to /datadir/mysql/time_zone.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_leap_second.ibd to /datadir/mysql/time_zone_leap_second.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_leap_second.frm to /datadir/mysql/time_zone_leap_second.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/user.MYD to /datadir/mysql/user.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/servers.frm to /datadir/mysql/servers.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_name.frm to /datadir/mysql/time_zone_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone.ibd to /datadir/mysql/time_zone.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_transition_type.ibd to /datadir/mysql/time_zone_transition_type.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/gtid_executed.frm to /datadir/mysql/gtid_executed.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/db.MYD to /datadir/mysql/db.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/ndb_binlog_index.MYD to /datadir/mysql/ndb_binlog_index.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/ndb_binlog_index.frm to /datadir/mysql/ndb_binlog_index.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/engine_cost.ibd to /datadir/mysql/engine_cost.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/servers.ibd to /datadir/mysql/servers.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_category.ibd to /datadir/mysql/help_category.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_topic.ibd to /datadir/mysql/help_topic.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/gtid_executed.ibd to /datadir/mysql/gtid_executed.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_worker_info.ibd to /datadir/mysql/slave_worker_info.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/server_cost.frm to /datadir/mysql/server_cost.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slow_log.CSM to /datadir/mysql/slow_log.CSM 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/func.MYD to /datadir/mysql/func.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/procs_priv.frm to /datadir/mysql/procs_priv.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/func.MYI to /datadir/mysql/func.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/db.opt to /datadir/mysql/db.opt 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_relation.frm to /datadir/mysql/help_relation.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/plugin.frm to /datadir/mysql/plugin.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_keyword.ibd to /datadir/mysql/help_keyword.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/innodb_index_stats.ibd to /datadir/mysql/innodb_index_stats.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_worker_info.frm to /datadir/mysql/slave_worker_info.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proxies_priv.MYI to /datadir/mysql/proxies_priv.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/help_category.frm to /datadir/mysql/help_category.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/user.MYI to /datadir/mysql/user.MYI 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/columns_priv.MYD to /datadir/mysql/columns_priv.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/db.frm to /datadir/mysql/db.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/general_log.CSM to /datadir/mysql/general_log.CSM 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/plugin.ibd to /datadir/mysql/plugin.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/time_zone_transition.ibd to /datadir/mysql/time_zone_transition.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/event.MYD to /datadir/mysql/event.MYD 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/slave_relay_log_info.ibd to /datadir/mysql/slave_relay_log_info.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./mysql/proxies_priv.frm to /datadir/mysql/proxies_priv.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_redundant_indexes.frm to /datadir/sys/schema_redundant_indexes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_index_statistics.frm to /datadir/sys/x@0024schema_index_statistics.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024innodb_buffer_stats_by_table.frm to /datadir/sys/x@0024innodb_buffer_stats_by_table.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/io_global_by_wait_by_latency.frm to /datadir/sys/io_global_by_wait_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary_by_stages.frm to /datadir/sys/host_summary_by_stages.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_table_statistics_with_buffer.frm to /datadir/sys/schema_table_statistics_with_buffer.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/innodb_lock_waits.frm to /datadir/sys/innodb_lock_waits.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/innodb_buffer_stats_by_table.frm to /datadir/sys/innodb_buffer_stats_by_table.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statements_with_temp_tables.frm to /datadir/sys/statements_with_temp_tables.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary.frm to /datadir/sys/user_summary.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary_by_statement_type.frm to /datadir/sys/host_summary_by_statement_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/memory_global_total.frm to /datadir/sys/memory_global_total.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024waits_by_user_by_latency.frm to /datadir/sys/x@0024waits_by_user_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary_by_statement_latency.frm to /datadir/sys/user_summary_by_statement_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/version.frm to /datadir/sys/version.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024memory_global_total.frm to /datadir/sys/x@0024memory_global_total.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/memory_by_user_by_current_bytes.frm to /datadir/sys/memory_by_user_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statements_with_sorting.frm to /datadir/sys/x@0024statements_with_sorting.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary_by_stages.frm to /datadir/sys/x@0024user_summary_by_stages.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/session.frm to /datadir/sys/session.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statements_with_sorting.frm to /datadir/sys/statements_with_sorting.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary_by_statement_latency.frm to /datadir/sys/x@0024user_summary_by_statement_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary_by_file_io.frm to /datadir/sys/x@0024host_summary_by_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary_by_file_io.frm to /datadir/sys/x@0024user_summary_by_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary.frm to /datadir/sys/host_summary.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_table_statistics_with_buffer.frm to /datadir/sys/x@0024schema_table_statistics_with_buffer.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_index_statistics.frm to /datadir/sys/schema_index_statistics.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/memory_by_thread_by_current_bytes.frm to /datadir/sys/memory_by_thread_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary_by_statement_type.frm to /datadir/sys/x@0024host_summary_by_statement_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024innodb_buffer_stats_by_schema.frm to /datadir/sys/x@0024innodb_buffer_stats_by_schema.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary_by_file_io.frm to /datadir/sys/host_summary_by_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statements_with_full_table_scans.frm to /datadir/sys/statements_with_full_table_scans.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/sys_config.ibd to /datadir/sys/sys_config.ibd 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024io_global_by_file_by_latency.frm to /datadir/sys/x@0024io_global_by_file_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024waits_by_host_by_latency.frm to /datadir/sys/x@0024waits_by_host_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024memory_by_host_by_current_bytes.frm to /datadir/sys/x@0024memory_by_host_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/sys_config.TRG to /datadir/sys/sys_config.TRG 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/io_global_by_file_by_latency.frm to /datadir/sys/io_global_by_file_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/io_global_by_file_by_bytes.frm to /datadir/sys/io_global_by_file_by_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024ps_digest_95th_percentile_by_avg_us.frm to /datadir/sys/x@0024ps_digest_95th_percentile_by_avg_us.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statements_with_runtimes_in_95th_percentile.frm to /datadir/sys/statements_with_runtimes_in_95th_percentile.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary.frm to /datadir/sys/x@0024host_summary.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/io_by_thread_by_latency.frm to /datadir/sys/io_by_thread_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024memory_by_thread_by_current_bytes.frm to /datadir/sys/x@0024memory_by_thread_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/io_global_by_wait_by_bytes.frm to /datadir/sys/io_global_by_wait_by_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary_by_file_io_type.frm to /datadir/sys/x@0024user_summary_by_file_io_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024waits_global_by_latency.frm to /datadir/sys/x@0024waits_global_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_table_statistics.frm to /datadir/sys/x@0024schema_table_statistics.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/memory_by_host_by_current_bytes.frm to /datadir/sys/memory_by_host_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary_by_file_io_type.frm to /datadir/sys/user_summary_by_file_io_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/sys_config.frm to /datadir/sys/sys_config.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024memory_by_user_by_current_bytes.frm to /datadir/sys/x@0024memory_by_user_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024memory_global_by_current_bytes.frm to /datadir/sys/x@0024memory_global_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_table_lock_waits.frm to /datadir/sys/schema_table_lock_waits.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary_by_file_io.frm to /datadir/sys/user_summary_by_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024session.frm to /datadir/sys/x@0024session.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/wait_classes_global_by_latency.frm to /datadir/sys/wait_classes_global_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024io_by_thread_by_latency.frm to /datadir/sys/x@0024io_by_thread_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024io_global_by_wait_by_bytes.frm to /datadir/sys/x@0024io_global_by_wait_by_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/waits_by_host_by_latency.frm to /datadir/sys/waits_by_host_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_tables_with_full_table_scans.frm to /datadir/sys/schema_tables_with_full_table_scans.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statements_with_runtimes_in_95th_percentile.frm to /datadir/sys/x@0024statements_with_runtimes_in_95th_percentile.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary_by_statement_latency.frm to /datadir/sys/x@0024host_summary_by_statement_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/wait_classes_global_by_avg_latency.frm to /datadir/sys/wait_classes_global_by_avg_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/session_ssl_status.frm to /datadir/sys/session_ssl_status.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_flattened_keys.frm to /datadir/sys/x@0024schema_flattened_keys.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_table_statistics.frm to /datadir/sys/schema_table_statistics.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/memory_global_by_current_bytes.frm to /datadir/sys/memory_global_by_current_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/ps_check_lost_instrumentation.frm to /datadir/sys/ps_check_lost_instrumentation.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary_by_stages.frm to /datadir/sys/user_summary_by_stages.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/latest_file_io.frm to /datadir/sys/latest_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statements_with_errors_or_warnings.frm to /datadir/sys/statements_with_errors_or_warnings.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024latest_file_io.frm to /datadir/sys/x@0024latest_file_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary_by_file_io_type.frm to /datadir/sys/x@0024host_summary_by_file_io_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024ps_schema_table_statistics_io.frm to /datadir/sys/x@0024ps_schema_table_statistics_io.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024io_global_by_file_by_bytes.frm to /datadir/sys/x@0024io_global_by_file_by_bytes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024wait_classes_global_by_latency.frm to /datadir/sys/x@0024wait_classes_global_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/waits_global_by_latency.frm to /datadir/sys/waits_global_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_tables_with_full_table_scans.frm to /datadir/sys/x@0024schema_tables_with_full_table_scans.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statements_with_errors_or_warnings.frm to /datadir/sys/x@0024statements_with_errors_or_warnings.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024io_global_by_wait_by_latency.frm to /datadir/sys/x@0024io_global_by_wait_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary_by_statement_type.frm to /datadir/sys/x@0024user_summary_by_statement_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_object_overview.frm to /datadir/sys/schema_object_overview.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024processlist.frm to /datadir/sys/x@0024processlist.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/db.opt to /datadir/sys/db.opt 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statements_with_temp_tables.frm to /datadir/sys/x@0024statements_with_temp_tables.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/user_summary_by_statement_type.frm to /datadir/sys/user_summary_by_statement_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statements_with_full_table_scans.frm to /datadir/sys/x@0024statements_with_full_table_scans.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/statement_analysis.frm to /datadir/sys/statement_analysis.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024ps_digest_avg_latency_distribution.frm to /datadir/sys/x@0024ps_digest_avg_latency_distribution.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024schema_table_lock_waits.frm to /datadir/sys/x@0024schema_table_lock_waits.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/processlist.frm to /datadir/sys/processlist.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024statement_analysis.frm to /datadir/sys/x@0024statement_analysis.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/metrics.frm to /datadir/sys/metrics.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/waits_by_user_by_latency.frm to /datadir/sys/waits_by_user_by_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024user_summary.frm to /datadir/sys/x@0024user_summary.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_unused_indexes.frm to /datadir/sys/schema_unused_indexes.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/sys_config_update_set_user.TRN to /datadir/sys/sys_config_update_set_user.TRN 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/schema_auto_increment_columns.frm to /datadir/sys/schema_auto_increment_columns.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary_by_statement_latency.frm to /datadir/sys/host_summary_by_statement_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/host_summary_by_file_io_type.frm to /datadir/sys/host_summary_by_file_io_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024wait_classes_global_by_avg_latency.frm to /datadir/sys/x@0024wait_classes_global_by_avg_latency.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/innodb_buffer_stats_by_schema.frm to /datadir/sys/innodb_buffer_stats_by_schema.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/sys_config_insert_set_user.TRN to /datadir/sys/sys_config_insert_set_user.TRN 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024innodb_lock_waits.frm to /datadir/sys/x@0024innodb_lock_waits.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./sys/x@0024host_summary_by_stages.frm to /datadir/sys/x@0024host_summary_by_stages.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_summary_by_host_by_event_name.frm to /datadir/performance_schema/events_transactions_summary_by_host_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_summary_by_host_by_event_name.frm to /datadir/performance_schema/events_stages_summary_by_host_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_connection_status.frm to /datadir/performance_schema/replication_connection_status.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/setup_actors.frm to /datadir/performance_schema/setup_actors.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/variables_by_thread.frm to /datadir/performance_schema/variables_by_thread.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_history.frm to /datadir/performance_schema/events_stages_history.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_by_thread_by_event_name.frm to /datadir/performance_schema/events_waits_summary_by_thread_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_group_members.frm to /datadir/performance_schema/replication_group_members.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/session_variables.frm to /datadir/performance_schema/session_variables.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_by_user_by_event_name.frm to /datadir/performance_schema/events_waits_summary_by_user_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_connection_configuration.frm to /datadir/performance_schema/replication_connection_configuration.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_global_by_event_name.frm to /datadir/performance_schema/events_statements_summary_global_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/global_variables.frm to /datadir/performance_schema/global_variables.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_history.frm to /datadir/performance_schema/events_transactions_history.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_history.frm to /datadir/performance_schema/events_statements_history.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_program.frm to /datadir/performance_schema/events_statements_summary_by_program.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/status_by_account.frm to /datadir/performance_schema/status_by_account.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/metadata_locks.frm to /datadir/performance_schema/metadata_locks.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_summary_by_user_by_event_name.frm to /datadir/performance_schema/events_transactions_summary_by_user_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_summary_global_by_event_name.frm to /datadir/performance_schema/events_stages_summary_global_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/threads.frm to /datadir/performance_schema/threads.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/status_by_host.frm to /datadir/performance_schema/status_by_host.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/session_connect_attrs.frm to /datadir/performance_schema/session_connect_attrs.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/users.frm to /datadir/performance_schema/users.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_applier_configuration.frm to /datadir/performance_schema/replication_applier_configuration.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/memory_summary_by_thread_by_event_name.frm to /datadir/performance_schema/memory_summary_by_thread_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/table_handles.frm to /datadir/performance_schema/table_handles.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_account_by_event_name.frm to /datadir/performance_schema/events_statements_summary_by_account_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_summary_by_thread_by_event_name.frm to /datadir/performance_schema/events_stages_summary_by_thread_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_applier_status_by_worker.frm to /datadir/performance_schema/replication_applier_status_by_worker.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/session_status.frm to /datadir/performance_schema/session_status.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/objects_summary_global_by_type.frm to /datadir/performance_schema/objects_summary_global_by_type.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/pxc_cluster_view.frm to /datadir/performance_schema/pxc_cluster_view.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_user_by_event_name.frm to /datadir/performance_schema/events_statements_summary_by_user_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_current.frm to /datadir/performance_schema/events_statements_current.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_applier_status_by_coordinator.frm to /datadir/performance_schema/replication_applier_status_by_coordinator.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_by_instance.frm to /datadir/performance_schema/events_waits_summary_by_instance.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/memory_summary_by_account_by_event_name.frm to /datadir/performance_schema/memory_summary_by_account_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/memory_summary_by_host_by_event_name.frm to /datadir/performance_schema/memory_summary_by_host_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/performance_timers.frm to /datadir/performance_schema/performance_timers.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_history_long.frm to /datadir/performance_schema/events_transactions_history_long.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_by_host_by_event_name.frm to /datadir/performance_schema/events_waits_summary_by_host_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/rwlock_instances.frm to /datadir/performance_schema/rwlock_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/session_account_connect_attrs.frm to /datadir/performance_schema/session_account_connect_attrs.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_history_long.frm to /datadir/performance_schema/events_waits_history_long.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/status_by_thread.frm to /datadir/performance_schema/status_by_thread.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_history.frm to /datadir/performance_schema/events_waits_history.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_history_long.frm to /datadir/performance_schema/events_statements_history_long.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/socket_summary_by_instance.frm to /datadir/performance_schema/socket_summary_by_instance.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/cond_instances.frm to /datadir/performance_schema/cond_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/table_io_waits_summary_by_index_usage.frm to /datadir/performance_schema/table_io_waits_summary_by_index_usage.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/global_status.frm to /datadir/performance_schema/global_status.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/mutex_instances.frm to /datadir/performance_schema/mutex_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_by_account_by_event_name.frm to /datadir/performance_schema/events_waits_summary_by_account_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_summary_by_user_by_event_name.frm to /datadir/performance_schema/events_stages_summary_by_user_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/prepared_statements_instances.frm to /datadir/performance_schema/prepared_statements_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_digest.frm to /datadir/performance_schema/events_statements_summary_by_digest.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_summary_by_account_by_event_name.frm to /datadir/performance_schema/events_transactions_summary_by_account_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/socket_summary_by_event_name.frm to /datadir/performance_schema/socket_summary_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/setup_objects.frm to /datadir/performance_schema/setup_objects.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_current.frm to /datadir/performance_schema/events_waits_current.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/file_summary_by_instance.frm to /datadir/performance_schema/file_summary_by_instance.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_summary_global_by_event_name.frm to /datadir/performance_schema/events_transactions_summary_global_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/file_instances.frm to /datadir/performance_schema/file_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/setup_consumers.frm to /datadir/performance_schema/setup_consumers.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_history_long.frm to /datadir/performance_schema/events_stages_history_long.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_host_by_event_name.frm to /datadir/performance_schema/events_statements_summary_by_host_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_applier_status.frm to /datadir/performance_schema/replication_applier_status.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/socket_instances.frm to /datadir/performance_schema/socket_instances.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_current.frm to /datadir/performance_schema/events_stages_current.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/db.opt to /datadir/performance_schema/db.opt 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_stages_summary_by_account_by_event_name.frm to /datadir/performance_schema/events_stages_summary_by_account_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_waits_summary_global_by_event_name.frm to /datadir/performance_schema/events_waits_summary_global_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/file_summary_by_event_name.frm to /datadir/performance_schema/file_summary_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/accounts.frm to /datadir/performance_schema/accounts.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/host_cache.frm to /datadir/performance_schema/host_cache.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_statements_summary_by_thread_by_event_name.frm to /datadir/performance_schema/events_statements_summary_by_thread_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/status_by_user.frm to /datadir/performance_schema/status_by_user.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_summary_by_thread_by_event_name.frm to /datadir/performance_schema/events_transactions_summary_by_thread_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/hosts.frm to /datadir/performance_schema/hosts.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/table_lock_waits_summary_by_table.frm to /datadir/performance_schema/table_lock_waits_summary_by_table.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/processlist.frm to /datadir/performance_schema/processlist.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/setup_timers.frm to /datadir/performance_schema/setup_timers.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/memory_summary_by_user_by_event_name.frm to /datadir/performance_schema/memory_summary_by_user_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/user_variables_by_thread.frm to /datadir/performance_schema/user_variables_by_thread.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/setup_instruments.frm to /datadir/performance_schema/setup_instruments.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/memory_summary_global_by_event_name.frm to /datadir/performance_schema/memory_summary_global_by_event_name.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/events_transactions_current.frm to /datadir/performance_schema/events_transactions_current.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/table_io_waits_summary_by_table.frm to /datadir/performance_schema/table_io_waits_summary_by_table.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./performance_schema/replication_group_member_stats.frm to /datadir/performance_schema/replication_group_member_stats.frm 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./ibtmp1 to /datadir/ibtmp1 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./xtrabackup_galera_info to /datadir/xtrabackup_galera_info 251204 12:14:09 [01] ...done 251204 12:14:09 [01] Moving ./binlog.000005 to /datadir/binlog.000005 251204 12:14:09 [01] ...done 251204 12:14:09 completed OK! + cat /tmp/tmp.2x7KvPhb3w Defaulted container "xtrabackup" out of: xtrabackup, backup-init (init) + rm /tmp/tmp.C2F08zlyUO /tmp/tmp.2x7KvPhb3w + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/restore-on-demand-backup-pvc.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.sstFV3Ml1V ++ mktemp + local LAST_ERR=/tmp/tmp.dGh4bsdrTc + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/restore-on-demand-backup-pvc.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.sstFV3Ml1V perconaxtradbclusterrestore.pxc.percona.com "on-demand-backup-pvc" deleted from one-pod-31237 namespace + cat /tmp/tmp.dGh4bsdrTc + rm /tmp/tmp.sstFV3Ml1V /tmp/tmp.dGh4bsdrTc + return 0 + wait_for_running one-pod-pxc 1 + local name=one-pod-pxc + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod one-pod-pxc-0 480 + local pod=one-pod-pxc-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo one-pod-pxc-0 + local container=pxc + set +o xtrace pod/one-pod-pxc-0 condition met waiting for pod/one-pod-pxc-0 to become Ready.Ok + desc 'check data after backup' + set +o xtrace ----------------------------------------------------------------------------------- check data after backup ----------------------------------------------------------------------------------- + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h one-pod-pxc-0.one-pod-pxc -uroot -proot_password' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.H5Xhcbfvvm +++ mktemp ++ local LAST_ERR=/tmp/tmp.don9jKJ9h2 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.H5Xhcbfvvm ++ cat /tmp/tmp.don9jKJ9h2 ++ rm /tmp/tmp.H5Xhcbfvvm /tmp/tmp.don9jKJ9h2 ++ return 0 + client_pod=pxc-client-857d976497-5957k + wait_pod pxc-client-857d976497-5957k + local pod=pxc-client-857d976497-5957k + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-5957k ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-5957k condition met waiting for pod/pxc-client-857d976497-5957k to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + [[ ! -s /tmp/tmp.8lGSiE4sEU/select-1.sql ]] ++ grep 'Unknown MySQL server host' /tmp/tmp.8lGSiE4sEU/select-1.sql + [[ -n '' ]] + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/compare/select-1.sql /tmp/tmp.8lGSiE4sEU/select-1.sql + check_pvc_md5 on-demand-backup-pvc + local backup_name=on-demand-backup-pvc + desc 'check backup file md5sum' + set +o xtrace ----------------------------------------------------------------------------------- check backup file md5sum ----------------------------------------------------------------------------------- ++ get_pvc_name_for_backup on-demand-backup-pvc ++ local backup_name=on-demand-backup-pvc +++ kubectl_bin get pxc-backup on-demand-backup-pvc -o 'jsonpath={.status.destination}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.2ihmJ8Ulvd ++++ mktemp +++ local LAST_ERR=/tmp/tmp.QwxZxwzvx2 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc-backup on-demand-backup-pvc -o 'jsonpath={.status.destination}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.2ihmJ8Ulvd +++ cat /tmp/tmp.QwxZxwzvx2 +++ rm /tmp/tmp.2ihmJ8Ulvd /tmp/tmp.QwxZxwzvx2 +++ return 0 ++ local destination=pvc/xb-on-demand-backup-pvc-20251204121155-18c20096 ++ [[ -z pvc/xb-on-demand-backup-pvc-20251204121155-18c20096 ]] ++ local pvc_name=xb-on-demand-backup-pvc-20251204121155-18c20096 ++ echo xb-on-demand-backup-pvc-20251204121155-18c20096 + pvc_name=xb-on-demand-backup-pvc-20251204121155-18c20096 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml xb-on-demand-backup-pvc-20251204121155-18c20096 + local config_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml + local pvc_name=xb-on-demand-backup-pvc-20251204121155-18c20096 + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml xb-on-demand-backup-pvc-20251204121155-18c20096 + local input_file=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml + local pvc_name=xb-on-demand-backup-pvc-20251204121155-18c20096 + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + local LAST_OUT=/tmp/tmp.06kJCfivmV + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-ee1be0ba#' + /usr/bin/sed -e 's#claimName:..*-backup-pvc$#claimName: xb-on-demand-backup-pvc-20251204121155-18c20096#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/fluentbit:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.one-pod-31237~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' ++ mktemp + local LAST_ERR=/tmp/tmp.StMNxH3Lsq + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.06kJCfivmV deployment.apps/backup-client created + cat /tmp/tmp.StMNxH3Lsq + rm /tmp/tmp.06kJCfivmV /tmp/tmp.StMNxH3Lsq + return 0 + sleep 10 ++ kubectl_bin get pods --selector=name=backup-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.chHZxuhLMw +++ mktemp ++ local LAST_ERR=/tmp/tmp.GGTYEb2nfd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=backup-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.chHZxuhLMw ++ cat /tmp/tmp.GGTYEb2nfd ++ rm /tmp/tmp.chHZxuhLMw /tmp/tmp.GGTYEb2nfd ++ return 0 + bak_client_pod=backup-client-dc9c5bddd-5mhlj + wait_pod backup-client-dc9c5bddd-5mhlj + local pod=backup-client-dc9c5bddd-5mhlj + local max_retry=480 + local ns= ++ echo backup-client-dc9c5bddd-5mhlj ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/backup-client-dc9c5bddd-5mhlj condition met waiting for pod/backup-client-dc9c5bddd-5mhlj to become Ready.Ok + kubectl_bin exec backup-client-dc9c5bddd-5mhlj -- bash -c 'cd /backup; md5sum -c md5sum.txt' ++ mktemp + local LAST_OUT=/tmp/tmp.cs5DptPAke ++ mktemp + local LAST_ERR=/tmp/tmp.f6VIIFBzRy + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl exec backup-client-dc9c5bddd-5mhlj -- bash -c 'cd /backup; md5sum -c md5sum.txt' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.cs5DptPAke xtrabackup.stream: OK + cat /tmp/tmp.f6VIIFBzRy + rm /tmp/tmp.cs5DptPAke /tmp/tmp.f6VIIFBzRy + return 0 + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.uw4YEc85Sa ++ mktemp + local LAST_ERR=/tmp/tmp.Y0J23PArrs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.uw4YEc85Sa deployment.apps "backup-client" deleted from one-pod-31237 namespace + cat /tmp/tmp.Y0J23PArrs + rm /tmp/tmp.uw4YEc85Sa /tmp/tmp.Y0J23PArrs + return 0 + '[' -z '' ']' + run_backup one-pod on-demand-backup-aws-s3 + local cluster=one-pod + local backup1=on-demand-backup-aws-s3 + desc 'make backup' + set +o xtrace ----------------------------------------------------------------------------------- make backup ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/on-demand-backup-aws-s3.yml ++ mktemp + local LAST_OUT=/tmp/tmp.zSWsdRrnrZ ++ mktemp + local LAST_ERR=/tmp/tmp.9mwsY5OJU4 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/one-pod/conf/on-demand-backup-aws-s3.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zSWsdRrnrZ perconaxtradbclusterbackup.pxc.percona.com/on-demand-backup-aws-s3 created + cat /tmp/tmp.9mwsY5OJU4 + rm /tmp/tmp.zSWsdRrnrZ /tmp/tmp.9mwsY5OJU4 + return 0 + wait_backup on-demand-backup-aws-s3 + local backup=on-demand-backup-aws-s3 + local status=Succeeded + set +o xtrace waiting for pxc-backup/on-demand-backup-aws-s3 to reach Succeeded state........................................................................................................................................................................................................................................................................................................................................................................2025-12-04T12:08:39.609Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.14-gke.1033000"} 2025-12-04T12:08:39.609Z INFO setup Manager starting up {"gitCommit": "ee1be0bad2174aa25222aa09ad9903b8a418344c", "gitBranch": "PR-2076-ee1be0ba", "buildTime": "2025-12-04T10:59:38Z", "goVersion": "go1.25.5", "os": "linux", "arch": "amd64"} 2025-12-04T12:08:39.613Z INFO setup Registering Components. 2025-12-04T12:08:39.790Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-12-04T12:08:39.790Z INFO setup Starting the Cmd. 2025-12-04T12:08:39.791Z INFO controller-runtime.metrics Starting metrics server 2025-12-04T12:08:39.791Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-12-04T12:08:39.791Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-12-04T12:08:39.791Z INFO controller-runtime.webhook Starting webhook server 2025-12-04T12:08:39.791Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-12-04T12:08:39.791Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-12-04T12:08:39.792Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-12-04T12:08:39.892Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-12-04T12:08:39.927Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-12-04T12:08:39.927Z DEBUG events percona-xtradb-cluster-operator-84658c6c47-9rq72_26b43ece-e50e-440b-abbc-c0f843553c6b became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"ad94d3e4-f232-4bf9-be0c-461feec421b9","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1764850119917199009"}, "reason": "LeaderElection"} 2025-12-04T12:08:39.927Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.Secret"} 2025-12-04T12:08:39.927Z INFO Starting EventSource {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-12-04T12:08:39.927Z INFO Starting EventSource {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-12-04T12:08:39.927Z INFO Starting EventSource {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-12-04T12:08:40.028Z INFO Starting Controller {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster"} 2025-12-04T12:08:40.028Z INFO Starting workers {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "worker count": 1} 2025-12-04T12:08:40.028Z INFO Starting Controller {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore"} 2025-12-04T12:08:40.028Z INFO Starting workers {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "worker count": 1} 2025-12-04T12:08:40.128Z INFO Starting Controller {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup"} 2025-12-04T12:08:40.128Z INFO Starting workers {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "worker count": 1} 2025-12-04T12:09:17.407Z INFO Set CR version {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "1da50077-4ffa-4043-b933-048b788afaef", "version": "1.19.0"} 2025-12-04T12:09:19.025Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "1da50077-4ffa-4043-b933-048b788afaef", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2025-12-04T12:09:19.144Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "1da50077-4ffa-4043-b933-048b788afaef", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-12-04T12:09:19.181Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "1da50077-4ffa-4043-b933-048b788afaef", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-04T12:09:19.226Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "1da50077-4ffa-4043-b933-048b788afaef", "object": "one-pod-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-12-04T12:09:19.982Z DEBUG Creating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "65eb96c6-0beb-4a14-a12c-a1c4cb0b4697", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-12-04T12:10:36.802Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "user": "root"} 2025-12-04T12:10:36.864Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "user": "operator"} 2025-12-04T12:10:36.971Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "user": "monitor"} 2025-12-04T12:10:37.122Z INFO User monitor: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556"} 2025-12-04T12:10:37.252Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "user": "xtrabackup"} 2025-12-04T12:10:37.525Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556"} 2025-12-04T12:10:37.632Z INFO Password expiration policy updated {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "user": "replication"} 2025-12-04T12:10:37.752Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "710ae9dd-cde9-42d0-89f1-2d5f2cc05556", "new version": "5.7.44-48-57-log"} 2025-12-04T12:11:03.016Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "2de4d8d7-ebe5-4838-8ea7-037444338e28", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850232100031012", + ResourceVersion: "", - Generation: 1, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImFlYTFhODBmN2IwOGI5OTNjYzNkNDcyYWYzZjI0MDFjIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzM5OTUzYTBiZDJkODlmOTg5MTE4YjYwZDY3ZmI1MCIsInBlcmNvbmEuY29tL3Nz"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6IjcyY2I0YzBkMTFhNjU2ZGQxMDE4MTdiMTcxZDAxOWNhIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzM5OTUzYTBiZDJkODlmOTg5MTE4YjYwZDY3ZmI1MCIsInBlcmNvbmEuY29tL3Nz"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:09:19 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:10:32 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &1, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": strings.Join({ - "aea1a80f7b08b993cc3d472af3f2401c", + "72cb4c0d11a656dd101817b171d019ca", }, ""), "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, - Secret: nil, + Secret: s"&SecretVolumeSource{SecretName:one-pod-pxc,Items:[]KeyToPath{},DefaultMode:nil,Optional:*false,}", NFS: nil, ISCSI: nil, ... // 8 identical fields FC: nil, AzureFile: nil, - ConfigMap: s"&ConfigMapVolumeSource{LocalObjectReference:LocalObjectReference{Name:one-pod-pxc,},Items:[]KeyToPath{},DefaultMode:*420,Optional:*true,}", + ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 1, + ObservedGeneration: 0, - Replicas: 1, + Replicas: 0, - ReadyReplicas: 1, + ReadyReplicas: 0, - CurrentReplicas: 1, + CurrentReplicas: 0, - UpdatedReplicas: 1, + UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-64d8f4ff7b", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-64d8f4ff7b", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 1, + AvailableReplicas: 0, }, } 2025-12-04T12:11:03.072Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "2de4d8d7-ebe5-4838-8ea7-037444338e28", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850232100031012", + ResourceVersion: "", - Generation: 1, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6ImFlYTFhODBmN2IwOGI5OTNjYzNkNDcyYWYzZjI0MDFjIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzM5OTUzYTBiZDJkODlmOTg5MTE4YjYwZDY3ZmI1MCIsInBlcmNvbmEuY29tL3Nz"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn19LCJ0ZW1wbGF0ZSI6eyJtZXRhZGF0YSI6eyJsYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJuZXRlcy5pby9uYW1lIjoicGVyY29uYS14dHJhZGItY2x1c3RlciIsImFwcC5rdWJlcm5ldGVzLmlvL3BhcnQtb2YiOiJwZXJjb25hLXh0cmFkYi1jbHVzdGVyIn0sImFubm90YXRpb25zIjp7Imt1YmVjdGwua3ViZXJuZXRlcy5pby9kZWZhdWx0LWNvbnRhaW5lciI6InB4YyIsInBlcmNvbmEuY29tL2NvbmZpZ3VyYXRpb24taGFzaCI6IjcyY2I0YzBkMTFhNjU2ZGQxMDE4MTdiMTcxZDAxOWNhIiwicGVyY29uYS5jb20vc3NsLWhhc2giOiJhNzM5OTUzYTBiZDJkODlmOTg5MTE4YjYwZDY3ZmI1MCIsInBlcmNvbmEuY29tL3Nz"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:09:19 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:10:32 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ Replicas: &1, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ ... // 9 identical fields DeletionGracePeriodSeconds: nil, Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: map[string]string{ "kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": strings.Join({ - "aea1a80f7b08b993cc3d472af3f2401c", + "72cb4c0d11a656dd101817b171d019ca", }, ""), "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce", }, OwnerReferences: nil, Finalizers: nil, ManagedFields: nil, }, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, - Secret: nil, + Secret: s"&SecretVolumeSource{SecretName:one-pod-pxc,Items:[]KeyToPath{},DefaultMode:nil,Optional:*false,}", NFS: nil, ISCSI: nil, ... // 8 identical fields FC: nil, AzureFile: nil, - ConfigMap: s"&ConfigMapVolumeSource{LocalObjectReference:LocalObjectReference{Name:one-pod-pxc,},Items:[]KeyToPath{},DefaultMode:*420,Optional:*true,}", + ConfigMap: nil, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 1, + ObservedGeneration: 0, - Replicas: 1, + Replicas: 0, - ReadyReplicas: 1, + ReadyReplicas: 0, - CurrentReplicas: 1, + CurrentReplicas: 0, - UpdatedReplicas: 1, + UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-64d8f4ff7b", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-64d8f4ff7b", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 1, + AvailableReplicas: 0, }, } 2025-12-04T12:11:55.388Z INFO Cluster is not ready for backup {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "b0841a0e-2296-4d97-8c4e-78e69ddd76b8", "cluster": "one-pod", "reason": "unsafe.backupIfUnhealthy must be true to run backup on cluster with status initializing"} 2025-12-04T12:12:00.389Z INFO Cluster is not ready for backup {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "454ee1e8-9121-4c91-80db-027f47d96b7e", "cluster": "one-pod", "reason": "unsafe.backupIfUnhealthy must be true to run backup on cluster with status initializing"} 2025-12-04T12:12:05.389Z INFO Cluster is not ready for backup {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "19b5dab9-396c-4102-b984-d39cc305dd20", "cluster": "one-pod", "reason": "unsafe.backupIfUnhealthy must be true to run backup on cluster with status initializing"} 2025-12-04T12:12:10.389Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "7519ff40-fe24-4c47-a35a-a6b4cef7da8e", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:10.389Z INFO Creating a new volume for backup {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "7519ff40-fe24-4c47-a35a-a6b4cef7da8e", "Namespace": "one-pod-31237", "Name": "xb-on-demand-backup-pvc-20251204121155-18c20096"} 2025-12-04T12:12:10.476Z INFO Created a new backup job {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "7519ff40-fe24-4c47-a35a-a6b4cef7da8e", "namespace": "one-pod-31237", "name": "xb-on-demand-backup-pvc"} 2025-12-04T12:12:15.477Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "85cfec9a-cb90-4945-85f6-010ca9c4d301", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:15.544Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "ffb93495-072c-43b1-a1d9-6ad974acbcfe", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:20.533Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "69c6371f-d93d-4d48-8f03-a09956b44c6d", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:25.573Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "9175bdd0-8069-4445-b117-9283ebbb36eb", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:30.628Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "22cc94f2-5f02-4801-bd61-4cff2cd43ca2", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:30.792Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "8753f390-4b56-4656-9485-8af03b6d8ab7", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:35.762Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "093d7685-15b9-4ebb-84eb-7107a84893a5", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:40.811Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "2d5af0ed-2f59-4d6d-ae52-d65d183e1a4e", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:45.842Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "99c0d184-72a0-4843-b80c-2dc60ca3ed20", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:45.953Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "42221355-3c9a-46ca-bf54-c1ba6945b985", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:50.946Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "ddc7c920-3062-4bf3-aa3d-5e64a165e42e", "cluster": "one-pod", "storage": "pvc", "allowed": true} 2025-12-04T12:12:50.986Z INFO Backup succeeded {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "ddc7c920-3062-4bf3-aa3d-5e64a165e42e", "job": "xb-on-demand-backup-pvc"} 2025-12-04T12:12:50.986Z DEBUG Removing mysql-init secret {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "ddc7c920-3062-4bf3-aa3d-5e64a165e42e", "job": "xb-on-demand-backup-pvc", "secret": "one-pod-mysql-init"} 2025-12-04T12:13:24.313Z INFO stopping cluster {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "0afcd399-43cb-4f1c-8ac6-6a75dda63e99", "cluster": "one-pod"} 2025-12-04T12:13:24.409Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "8ba20644-4312-45c1-b4a5-6092780b6c2c", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850326325743012", + ResourceVersion: "", - Generation: 2, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:11:03 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:12:06 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ - Replicas: &1, + Replicas: &0, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "72cb4c0d11a656dd101817b171d019ca", "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce"}}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-pxc", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 2, + ObservedGeneration: 0, - Replicas: 1, + Replicas: 0, - ReadyReplicas: 1, + ReadyReplicas: 0, - CurrentReplicas: 1, + CurrentReplicas: 0, - UpdatedReplicas: 1, + UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-849f8c9bd4", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-849f8c9bd4", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 1, + AvailableReplicas: 0, }, } 2025-12-04T12:13:24.458Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "8ba20644-4312-45c1-b4a5-6092780b6c2c", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850326325743012", + ResourceVersion: "", - Generation: 2, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:11:03 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:12:06 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ - Replicas: &1, + Replicas: &0, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "72cb4c0d11a656dd101817b171d019ca", "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce"}}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-pxc", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 2, + ObservedGeneration: 0, - Replicas: 1, + Replicas: 0, - ReadyReplicas: 1, + ReadyReplicas: 0, - CurrentReplicas: 1, + CurrentReplicas: 0, - UpdatedReplicas: 1, + UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-849f8c9bd4", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-849f8c9bd4", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, - AvailableReplicas: 1, + AvailableReplicas: 0, }, } 2025-12-04T12:13:38.400Z INFO starting restore {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "1ee7f7a2-3733-475d-9b05-5f355fcdbbf2", "cluster": "one-pod", "backup": "on-demand-backup-pvc"} 2025-12-04T12:13:38.656Z INFO starting restore {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "f02263d6-1b47-41c1-9b56-57712ab8c1ba", "cluster": "one-pod", "backup": "on-demand-backup-pvc"} 2025-12-04T12:13:43.614Z INFO starting restore {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "2c6af94a-434c-491b-a347-04362ce968ec", "cluster": "one-pod", "backup": "on-demand-backup-pvc"} 2025-12-04T12:13:48.667Z INFO starting restore {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "b271a6ad-8dc3-45b4-b4ae-eb0fab7fac6c", "cluster": "one-pod", "backup": "on-demand-backup-pvc"} 2025-12-04T12:13:53.718Z INFO starting restore {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "1ea4aa26-6f0f-4d71-90bd-47ea9ca09a21", "cluster": "one-pod", "backup": "on-demand-backup-pvc"} 2025-12-04T12:13:53.793Z INFO Waiting for restore job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "eac0ef1f-5ed0-4bfb-97f1-d38c9d6b98d6", "job": "restore-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:13:58.786Z INFO Waiting for restore job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "f909cf5e-53e7-4a12-ae97-79dce91b0a85", "job": "restore-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:03.804Z INFO Waiting for restore job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "04d8e8d2-e75d-4703-81de-a873babec4c6", "job": "restore-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:08.925Z INFO Waiting for restore job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "513896d1-22a2-42f3-9963-1c84af312662", "job": "restore-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:13.943Z INFO preparing cluster {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "75604022-c309-4448-bae2-fe01bc3a300d", "cluster": "one-pod"} 2025-12-04T12:14:14.021Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "f5d275a3-24e8-4652-a708-ff6183741710", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:18.967Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "c1988772-6f4c-4244-bd26-a1c3360efe89", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:23.985Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "a1687c95-b07d-44e5-b70a-873f4468cd07", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:29.073Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "335959a7-1d15-43d4-bef3-f1b1bf8623a1", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:34.096Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "d7788223-1c8e-42aa-95c3-55d481ee9433", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:39.113Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "cd5fa07b-47cc-4d6b-8a19-7b37cd31acd2", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:44.130Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "dbbc37ae-c9d9-43c3-8d48-543d3a399f61", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:49.150Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "919675ed-5f77-4195-8f5a-303a034e4003", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:54.166Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "998e6e16-7e8e-4fac-9d05-cf6a931ebd1d", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:14:59.190Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "60d4ca96-355f-4e2e-9736-93f3094989c5", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:15:04.208Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "e10d2543-a6e3-4f8d-afea-839caa452771", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:15:09.223Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "7b99d547-436f-4850-b82d-56daba5e55f9", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:15:14.239Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "0c838b1b-c6a9-48f3-b230-56a57a2e3f18", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:15:19.256Z INFO Waiting for prepare job to finish {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "a7a53e87-f5b4-44d6-be58-94449ed7a518", "job": "prepare-job-on-demand-backup-pvc-one-pod"} 2025-12-04T12:15:24.274Z INFO starting cluster {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "22c21273-8991-4a5a-85c9-ae709c1b188d", "cluster": "one-pod"} 2025-12-04T12:15:24.493Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "88269b74-2704-4f5f-9491-60fe230c414e", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850417612671012", + ResourceVersion: "", - Generation: 3, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:13:24 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:13:37 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ - Replicas: &0, + Replicas: &1, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "72cb4c0d11a656dd101817b171d019ca", "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce"}}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-pxc", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 3, + ObservedGeneration: 0, Replicas: 0, ReadyReplicas: 0, CurrentReplicas: 0, UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-849f8c9bd4", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-849f8c9bd4", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, AvailableReplicas: 0, }, } 2025-12-04T12:15:24.585Z DEBUG Updating object {"controller": "pxc-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBCluster", "PerconaXtraDBCluster": {"name":"one-pod","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "one-pod", "reconcileID": "88269b74-2704-4f5f-9491-60fe230c414e", "object": "one-pod-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}", "hashChanged": true, "metaChanged": true} &v1.StatefulSet{ TypeMeta: {Kind: "StatefulSet", APIVersion: "apps/v1"}, ObjectMeta: v1.ObjectMeta{ ... // 2 identical fields Namespace: "one-pod-31237", SelfLink: "", - UID: "28cc85de-e6f0-49c0-85af-551a7a5b04a1", + UID: "", - ResourceVersion: "1764850417612671012", + ResourceVersion: "", - Generation: 3, + Generation: 0, - CreationTimestamp: v1.Time{Time: s"2025-12-04 12:09:19 +0000 UTC"}, + CreationTimestamp: v1.Time{}, DeletionTimestamp: nil, DeletionGracePeriodSeconds: nil, Labels: nil, - Annotations: map[string]string{ - "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MCwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., - }, + Annotations: map[string]string{ + "percona.com/last-config-hash": "eyJyZXBsaWNhcyI6MSwic2VsZWN0b3IiOnsibWF0Y2hMYWJlbHMiOnsiYXBwLmt1YmVybmV0ZXMuaW8vY29tcG9uZW50IjoicHhjIiwiYXBwLmt1YmVybmV0ZXMuaW8vaW5zdGFuY2UiOiJvbmUtcG9kIiwiYXBwLmt1YmVybmV0ZXMuaW8vbWFuYWdlZC1ieSI6InBlcmNvbmEteHRyYWRiLWNsdXN0ZXItb3BlcmF0b3IiLCJhcHAua3ViZXJu"..., + }, OwnerReferences: {{APIVersion: "pxc.percona.com/v1", Kind: "PerconaXtraDBCluster", Name: "one-pod", UID: "9f03cda0-47c2-488b-9348-57cb4002c7cc", ...}}, Finalizers: nil, - ManagedFields: []v1.ManagedFieldsEntry{ - { - Manager: "percona-xtradb-cluster-operator", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:13:24 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:metadata":{"f:annotations":{".":{},"f:percona.com/last-confi`..., - }, - { - Manager: "kube-controller-manager", - Operation: "Update", - APIVersion: "apps/v1", - Time: s"2025-12-04 12:13:37 +0000 UTC", - FieldsType: "FieldsV1", - FieldsV1: s`{"f:status":{"f:availableReplicas":{},"f:collisionCount":{},"f:c`..., - Subresource: "status", - }, - }, + ManagedFields: nil, }, Spec: v1.StatefulSetSpec{ - Replicas: &0, + Replicas: &1, Selector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Template: v1.PodTemplateSpec{ ObjectMeta: {Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}, Annotations: {"kubectl.kubernetes.io/default-container": "pxc", "percona.com/configuration-hash": "72cb4c0d11a656dd101817b171d019ca", "percona.com/ssl-hash": "a739953a0bd2d89f989118b60d67fb50", "percona.com/ssl-internal-hash": "3c8a7fd9ebee90e24c57504d70c9e9ce"}}, Spec: v1.PodSpec{ Volumes: []v1.Volume{ {Name: "tmp", VolumeSource: {EmptyDir: &{}}}, { Name: "config", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-pxc", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl-internal", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl-internal", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "ssl", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-ssl", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "auto-config", VolumeSource: v1.VolumeSource{ ... // 16 identical fields FC: nil, AzureFile: nil, ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: {Name: "auto-one-pod-pxc"}, Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, VsphereVolume: nil, Quobyte: nil, ... // 9 identical fields }, }, { Name: "vault-keyring-secret", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-vault", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-users-secret-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "internal-one-pod", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &false, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, { Name: "mysql-init-file", VolumeSource: v1.VolumeSource{ ... // 3 identical fields AWSElasticBlockStore: nil, GitRepo: nil, Secret: &v1.SecretVolumeSource{ SecretName: "one-pod-mysql-init", Items: nil, - DefaultMode: &420, + DefaultMode: nil, Optional: &true, }, NFS: nil, ISCSI: nil, ... // 22 identical fields }, }, }, InitContainers: []v1.Container{ { ... // 16 identical fields StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, Containers: []v1.Container{ { ... // 3 identical fields Args: {"mysqld"}, WorkingDir: "", Ports: []v1.ContainerPort{ { Name: "mysql", HostPort: 0, ContainerPort: 3306, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "sst", HostPort: 0, ContainerPort: 4444, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "write-set", HostPort: 0, ContainerPort: 4567, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "ist", HostPort: 0, ContainerPort: 4568, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysql-admin", HostPort: 0, ContainerPort: 33062, - Protocol: "TCP", + Protocol: "", HostIP: "", }, { Name: "mysqlx", HostPort: 0, ContainerPort: 33060, - Protocol: "TCP", + Protocol: "", HostIP: "", }, }, EnvFrom: {{SecretRef: &{LocalObjectReference: {Name: "one-pod-env-vars-pxc"}, Optional: &true}}}, Env: {{Name: "PXC_SERVICE", Value: "one-pod-pxc-unready"}, {Name: "MONITOR_HOST", Value: "%"}, {Name: "MYSQL_ROOT_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "root"}}}, {Name: "XTRABACKUP_PASSWORD", ValueFrom: &{SecretKeyRef: &{LocalObjectReference: {Name: "internal-one-pod"}, Key: "xtrabackup"}}}, ...}, ... // 4 identical fields VolumeMounts: {{Name: "datadir", MountPath: "/var/lib/mysql"}, {Name: "config", MountPath: "/etc/percona-xtradb-cluster.conf.d"}, {Name: "tmp", MountPath: "/tmp"}, {Name: "ssl", MountPath: "/etc/mysql/ssl"}, ...}, VolumeDevices: nil, LivenessProbe: &v1.Probe{ ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/liveness-check.sh"}}}, InitialDelaySeconds: 300, TimeoutSeconds: 5, - PeriodSeconds: 10, + PeriodSeconds: 0, SuccessThreshold: 1, FailureThreshold: 3, TerminationGracePeriodSeconds: nil, }, ReadinessProbe: &{ProbeHandler: {Exec: &{Command: {"/var/lib/mysql/readiness-check.sh"}}}, InitialDelaySeconds: 15, TimeoutSeconds: 15, PeriodSeconds: 30, ...}, StartupProbe: nil, Lifecycle: nil, - TerminationMessagePath: "/dev/termination-log", + TerminationMessagePath: "", - TerminationMessagePolicy: "File", + TerminationMessagePolicy: "", ImagePullPolicy: "Always", SecurityContext: nil, ... // 3 identical fields }, }, EphemeralContainers: nil, - RestartPolicy: "Always", + RestartPolicy: "", TerminationGracePeriodSeconds: &600, ActiveDeadlineSeconds: nil, - DNSPolicy: "ClusterFirst", + DNSPolicy: "", NodeSelector: nil, ServiceAccountName: "default", - DeprecatedServiceAccount: "default", + DeprecatedServiceAccount: "", AutomountServiceAccountToken: nil, NodeName: "", ... // 7 identical fields Subdomain: "", Affinity: &{PodAntiAffinity: &{RequiredDuringSchedulingIgnoredDuringExecution: {{LabelSelector: &{MatchLabels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, TopologyKey: "kubernetes.io/hostname"}}}}, - SchedulerName: "default-scheduler", + SchedulerName: "", Tolerations: nil, HostAliases: nil, ... // 6 identical fields PreemptionPolicy: nil, Overhead: nil, - TopologySpreadConstraints: nil, + TopologySpreadConstraints: []v1.TopologySpreadConstraint{}, SetHostnameAsFQDN: nil, OS: nil, ... // 5 identical fields }, }, VolumeClaimTemplates: []v1.PersistentVolumeClaim{ { TypeMeta: {}, ObjectMeta: {Name: "datadir", Labels: {"app.kubernetes.io/component": "pxc", "app.kubernetes.io/instance": "one-pod", "app.kubernetes.io/managed-by": "percona-xtradb-cluster-operator", "app.kubernetes.io/name": "percona-xtradb-cluster", ...}}, Spec: v1.PersistentVolumeClaimSpec{ ... // 3 identical fields VolumeName: "", StorageClassName: nil, - VolumeMode: &"Filesystem", + VolumeMode: nil, DataSource: nil, DataSourceRef: nil, VolumeAttributesClassName: nil, }, Status: v1.PersistentVolumeClaimStatus{ - Phase: "Pending", + Phase: "", AccessModes: nil, Capacity: nil, ... // 5 identical fields }, }, }, ServiceName: "one-pod-pxc", - PodManagementPolicy: "OrderedReady", + PodManagementPolicy: "", UpdateStrategy: {Type: "RollingUpdate", RollingUpdate: &{Partition: &0}}, - RevisionHistoryLimit: &10, + RevisionHistoryLimit: nil, MinReadySeconds: 0, - PersistentVolumeClaimRetentionPolicy: s"&StatefulSetPersistentVolumeClaimRetentionPolicy{WhenDeleted:Retain,WhenScaled:Retain,}", + PersistentVolumeClaimRetentionPolicy: nil, Ordinals: nil, }, Status: v1.StatefulSetStatus{ - ObservedGeneration: 3, + ObservedGeneration: 0, Replicas: 0, ReadyReplicas: 0, CurrentReplicas: 0, UpdatedReplicas: 0, - CurrentRevision: "one-pod-pxc-849f8c9bd4", + CurrentRevision: "", - UpdateRevision: "one-pod-pxc-849f8c9bd4", + UpdateRevision: "", - CollisionCount: &0, + CollisionCount: nil, Conditions: nil, AvailableReplicas: 0, }, } 2025-12-04T12:15:29.386Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "edca7ecf-8295-4374-baf5-32386bc4cca6", "cluster": "one-pod"} 2025-12-04T12:15:34.402Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "9ef9c527-5c37-4813-ad77-b62154f01272", "cluster": "one-pod"} 2025-12-04T12:15:39.419Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "0ba2e89f-a09c-4674-aa0a-a521fce488c8", "cluster": "one-pod"} 2025-12-04T12:15:44.435Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "eb447fdb-3e83-4ba7-8c25-150b5d6faade", "cluster": "one-pod"} 2025-12-04T12:15:49.459Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "874f6c4b-38cd-4aec-87cc-a58f5247eb27", "cluster": "one-pod"} 2025-12-04T12:15:54.477Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "8d7afbde-91a0-4f80-9a5d-ad8a28274b34", "cluster": "one-pod"} 2025-12-04T12:15:59.494Z INFO Waiting for cluster to start {"controller": "pxcrestore-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterRestore", "PerconaXtraDBClusterRestore": {"name":"on-demand-backup-pvc","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-pvc", "reconcileID": "449dc306-1099-40cc-ab03-fc2e5da6e95d", "cluster": "one-pod"} 2025-12-04T12:16:59.840Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "674b4d3d-fbfe-45a7-a4af-08e195eec71f", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:16:59.869Z INFO Created a new backup job {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "674b4d3d-fbfe-45a7-a4af-08e195eec71f", "namespace": "one-pod-31237", "name": "xb-on-demand-backup-aws-s3"} 2025-12-04T12:17:04.870Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "29007e22-a131-4d27-aebf-b559209d4a34", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:04.946Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "49f8ddba-4c97-4b5e-8e0e-b93e9202070d", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:09.936Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "8a007b8d-26b9-4ab3-91de-ed9d680b59ca", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:14.993Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "080c5a53-f0ca-467d-8e22-81a703e3e1d5", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:20.033Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "337e6750-5779-40bc-a2a5-e0040e6400c4", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:25.082Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "2bc8e6d3-cabd-4cc7-b93a-c2621ecf8b5f", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:30.122Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "51d5f377-6614-4ba3-bb16-c7398221b530", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:35.186Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "2a07b386-94cf-406d-94f8-01878e455c6f", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:40.233Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "bb54f384-45e4-47c9-9585-a949e5da66c1", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:40.307Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "4cfdb4df-fc26-497a-a98c-22b904cb456d", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:45.293Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "029c3461-f12f-4cdb-838c-4d14c812f6fc", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:50.340Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "5fbf9acc-5881-4857-9083-04fbf60ce9be", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:50.432Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "dd558631-0966-4fe9-89ab-b877c8999f42", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:17:55.433Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "985c596f-b4ae-4d06-87af-240389f452ab", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:00.466Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "0cfc14bb-05b3-4ca6-8e0d-35490a00c0bd", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:05.502Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "2e1ff219-347a-4576-b914-207c33c5e357", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:10.545Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "6545168d-9ee4-4b26-bd61-1befce20fc70", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:15.581Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "671e56d9-bfec-44d5-b032-ff41a75fc527", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:20.629Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "c02b3068-0f94-4ada-9b69-8e199f92101c", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:25.664Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "c3365268-d3bb-4d35-ac71-a6d4039cd20c", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:30.704Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "692b98c4-2658-4f84-b12c-acd62ede3d57", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:35.756Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "5e13b165-231f-42c6-940c-28c6713c1c9f", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:40.796Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "ac8d9ee7-15aa-415e-a3f5-36726571cc56", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:45.840Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "d80963c1-6b34-4d42-8a23-d29c52c2f86b", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:50.877Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "b1d5a9ac-1b74-4596-a9c0-b2580af06f8e", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:18:55.915Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "292787c0-4462-4b39-91e3-45bf5e141d1d", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:00.956Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "9fa3625e-9c45-4d2a-9768-dec3a02ab5ee", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:05.988Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "505920b2-fc94-4cfb-9f04-40db048c011e", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:11.049Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "3f01ba14-cc5a-4f37-933b-53e1dca8c832", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:16.083Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "fe88e500-6c66-4734-885a-278b54489930", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:21.122Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "f5a1abbd-b671-4069-8cb5-e79f752720af", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:26.162Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "57826373-1944-45df-b4dc-ca3d7c2b4cb7", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:26.228Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "20ec842b-88da-4593-b14c-9deffc530236", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:31.219Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "69886aaf-c939-4b00-8e30-60bfd050d45d", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:36.260Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "95ce7354-2094-45f7-81ee-3765e4654e7e", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:41.300Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "455f7e85-6afc-45c6-9cdf-a77f1401b4f2", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:46.338Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "8e9e65f8-c4b7-47b9-9cb4-26d7728adfb3", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:51.380Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "5d544391-515c-4f5f-b228-fa68e212e0dd", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:51.459Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "21c6e289-c15a-47c3-9414-086d5ee95db1", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:19:56.452Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "62d23379-b442-43eb-ab96-52a6ca6cfe26", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:01.505Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "53131fa0-501f-44f9-a4c9-1e8e68c4a5a7", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:06.540Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "d7c81d03-2744-44ef-b85d-f968cf5a7019", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:11.581Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "9661230a-489a-4af9-8fa0-7f2411ea48a4", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:16.617Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "2fecf951-8282-44e9-abe0-154809b621ea", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:21.656Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "6d8815fd-aa4b-4402-9602-c8966c107c1c", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:26.717Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "f0190345-8b4c-4056-891d-274d83986f0a", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:31.756Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "a96af19c-70dc-48c8-b5b0-c0f0ab8a0384", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:36.790Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "b1ea1f36-ed0b-4699-a78e-d69cda93aa94", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:41.833Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "065df21b-9e17-4f6b-bd43-0e86262501f1", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:46.870Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "c9466339-4368-41f3-8770-83bdba11e7df", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:51.919Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "a67470f7-ff5f-4618-9b65-d9eb242a472b", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:20:57.316Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "900cbf31-82d4-4971-97dc-6ad696f513d5", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:02.351Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "8550df5a-5d78-497f-a5de-cb2321f7f38a", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:07.401Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "51b136e6-9e4a-423c-a5b1-17fb37a73467", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:12.568Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "b7380414-42f7-4320-b66f-003684a8839b", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:17.610Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "ac09714c-f2a7-46ec-9c03-81d374969739", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:22.647Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "bd9adf21-4d1e-4ab2-a421-2513d1d687dd", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:22.897Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "e65d9553-73ac-4152-9960-cb90bacec1ac", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:27.880Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "3004959c-4f81-41d4-888e-f282c4bfd7cb", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:32.924Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "fb80703c-1b09-4523-a25f-705497bd70a3", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:37.966Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "4e17a7aa-3ee3-49fb-829c-dc968169371b", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:43.016Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "71122622-f5d6-45d1-8a27-e649296f4417", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:48.072Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "a156cc60-f3bd-4bed-bfac-699b082faeb9", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:53.124Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "859a7eb6-6043-480f-bf26-8e26d11a77fa", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:21:58.164Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "f4d70542-18f7-4d4c-92fd-f5553c518e87", "cluster": "one-pod", "storage": "aws-s3", "allowed": true} 2025-12-04T12:22:03.201Z INFO Backup didn't start running in runningDeadlineSeconds, failing the backup {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "7355e9a5-f0fd-4b47-b2e7-ae6ed9de944d", "runningDeadlineSeconds": 300, "passedSeconds": 304.201704442} 2025-12-04T12:22:03.221Z INFO running deadline exceeded, deleting the job and its pods {"controller": "pxcbackup-controller", "controllerGroup": "pxc.percona.com", "controllerKind": "PerconaXtraDBClusterBackup", "PerconaXtraDBClusterBackup": {"name":"on-demand-backup-aws-s3","namespace":"one-pod-31237"}, "namespace": "one-pod-31237", "name": "on-demand-backup-aws-s3", "reconcileID": "7355e9a5-f0fd-4b47-b2e7-ae6ed9de944d", "cluster": "one-pod"} 2025-12-04T12:25:13.721Z INFO controller-runtime.cache Warning: watch ended with error {"reflector": "pkg/mod/k8s.io/client-go@v0.34.2/tools/cache/reflector.go:290", "type": "*v1.Event", "err": "very short watch: pkg/mod/k8s.io/client-go@v0.34.2/tools/cache/reflector.go:290: Unexpected watch close - watch lasted less than a second and no items received"} max retry count 360 reached. something went wrong with operator or kubernetes cluster