Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/logs/demand-backup-flow-control-8-0.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + cluster=some-name + create_infra demand-backup-flow-control-25476 + local ns=demand-backup-flow-control-25476 + '[' -n pxc-operator ']' + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + kubectl patch pxc -n demand-backup-flow-control-11750 some-name --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/some-name patched + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.HFel1VPOpt ++ mktemp + local LAST_ERR=/tmp/tmp.KC1Uf34nkT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.HFel1VPOpt perconaxtradbcluster.pxc.percona.com "some-name" deleted from demand-backup-flow-control-11750 namespace + cat /tmp/tmp.KC1Uf34nkT + rm /tmp/tmp.HFel1VPOpt /tmp/tmp.KC1Uf34nkT + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.WlaFikoAnS ++ mktemp + local LAST_ERR=/tmp/tmp.ucH22akfYA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WlaFikoAnS perconaxtradbclusterbackup.pxc.percona.com "backup1" deleted from demand-backup-flow-control-11750 namespace perconaxtradbclusterbackup.pxc.percona.com "backup2" deleted from demand-backup-flow-control-11750 namespace + cat /tmp/tmp.ucH22akfYA + rm /tmp/tmp.WlaFikoAnS /tmp/tmp.ucH22akfYA + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.gMyxorO5dh ++ mktemp + local LAST_ERR=/tmp/tmp.hM38XV3w36 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gMyxorO5dh No resources found + cat /tmp/tmp.hM38XV3w36 + rm /tmp/tmp.gMyxorO5dh /tmp/tmp.hM38XV3w36 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ helm list --all-namespaces --filter chaos-mesh ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get MutatingWebhookConfiguration + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep validate-auth ++ kubectl get ValidatingWebhookConfiguration + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl api-resources ++ grep chaos-mesh ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrolebinding ++ grep chaos-mesh + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get clusterrole + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + awk '{print$1}' + xargs kubectl delete ns + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.FLvcZGJgCN + kubectl_bin get ns ++ mktemp + local LAST_OUT=/tmp/tmp.j5IKtLnlwR ++ mktemp + local LAST_ERR=/tmp/tmp.vQKivUCWAQ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns ++ mktemp + local LAST_ERR=/tmp/tmp.p4iNU5DDEo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.j5IKtLnlwR + cat /tmp/tmp.vQKivUCWAQ + rm /tmp/tmp.j5IKtLnlwR /tmp/tmp.vQKivUCWAQ + return 0 namespace "demand-backup-flow-control-11750" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.FLvcZGJgCN namespace "pxc-operator" deleted + cat /tmp/tmp.p4iNU5DDEo + rm /tmp/tmp.FLvcZGJgCN /tmp/tmp.p4iNU5DDEo + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.ijhN1CsQEe ++ mktemp + local LAST_ERR=/tmp/tmp.JU2unXz5pD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ijhN1CsQEe namespace/pxc-operator created + cat /tmp/tmp.JU2unXz5pD + rm /tmp/tmp.ijhN1CsQEe /tmp/tmp.JU2unXz5pD + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.6oT5S8Va9i +++ mktemp ++ local LAST_ERR=/tmp/tmp.v3w24Y6OOC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6oT5S8Va9i ++ cat /tmp/tmp.v3w24Y6OOC ++ rm /tmp/tmp.6oT5S8Va9i /tmp/tmp.v3w24Y6OOC ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.rxO1pbYegj ++ mktemp + local LAST_ERR=/tmp/tmp.LNZ2drXMwf + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rxO1pbYegj Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6" modified. + cat /tmp/tmp.LNZ2drXMwf + rm /tmp/tmp.rxO1pbYegj /tmp/tmp.LNZ2drXMwf + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.x4owK249VE ++ mktemp + local LAST_ERR=/tmp/tmp.cURbwlPlLo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.x4owK249VE customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.cURbwlPlLo + rm /tmp/tmp.x4owK249VE /tmp/tmp.cURbwlPlLo + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.EOyMN1nG9O + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-rbac.yaml ++ mktemp + local LAST_ERR=/tmp/tmp.7NwmwDePOk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.EOyMN1nG9O clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.7NwmwDePOk + rm /tmp/tmp.EOyMN1nG9O /tmp/tmp.7NwmwDePOk + return 0 + kubectl_bin apply -f - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/deploy/cw-operator.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.E9T5zd6l7d ++ mktemp + local LAST_ERR=/tmp/tmp.FnzsXXdkUa + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.E9T5zd6l7d deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.FnzsXXdkUa + rm /tmp/tmp.E9T5zd6l7d /tmp/tmp.FnzsXXdkUa + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.YfRChVx71y ++ mktemp + local LAST_ERR=/tmp/tmp.7QWc6ofrPo + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YfRChVx71y pod/percona-xtradb-cluster-operator-b5f9c4897-blml5 condition met + cat /tmp/tmp.7QWc6ofrPo + rm /tmp/tmp.YfRChVx71y /tmp/tmp.7QWc6ofrPo + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.C08npcXox7 +++ mktemp ++ local LAST_ERR=/tmp/tmp.zHidGwuhtz ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.C08npcXox7 ++ cat /tmp/tmp.zHidGwuhtz ++ rm /tmp/tmp.C08npcXox7 /tmp/tmp.zHidGwuhtz ++ return 0 + wait_pod percona-xtradb-cluster-operator-b5f9c4897-blml5 480 pxc-operator + local pod=percona-xtradb-cluster-operator-b5f9c4897-blml5 + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-b5f9c4897-blml5 ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-b5f9c4897-blml5 condition met waiting for pod/percona-xtradb-cluster-operator-b5f9c4897-blml5 to become Ready.Ok + sleep 3 + create_namespace demand-backup-flow-control-25476 + local namespace=demand-backup-flow-control-25476 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ awk '{print $1}' ++ grep validate-auth + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ grep chaos-mesh ++ kubectl get clusterrolebinding + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + '[' -n '' ']' + desc 'cleaned up old namespaces demand-backup-flow-control-25476' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces demand-backup-flow-control-25476 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace demand-backup-flow-control-25476 + awk '{print$1}' + xargs kubectl delete ns ++ mktemp + local LAST_OUT=/tmp/tmp.vdLXbzd7wY ++ mktemp + local LAST_ERR=/tmp/tmp.nYSFd6xYwj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-flow-control-25476 + egrep -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp + local LAST_OUT=/tmp/tmp.tOZNUzPNLT ++ mktemp + local LAST_ERR=/tmp/tmp.NUnFb8ZGHD + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tOZNUzPNLT + cat /tmp/tmp.NUnFb8ZGHD + rm /tmp/tmp.tOZNUzPNLT /tmp/tmp.NUnFb8ZGHD + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + return 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-flow-control-25476 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace demand-backup-flow-control-25476 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.vdLXbzd7wY + cat /tmp/tmp.nYSFd6xYwj Error from server (NotFound): namespaces "demand-backup-flow-control-25476" not found + rm /tmp/tmp.vdLXbzd7wY /tmp/tmp.nYSFd6xYwj + return 1 + : + wait_for_delete namespace/demand-backup-flow-control-25476 + local res=namespace/demand-backup-flow-control-25476 + echo -n 'waiting for namespace/demand-backup-flow-control-25476 to be deleted' waiting for namespace/demand-backup-flow-control-25476 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "demand-backup-flow-control-25476" not found + desc 'create namespace demand-backup-flow-control-25476' + set +o xtrace ----------------------------------------------------------------------------------- create namespace demand-backup-flow-control-25476 ----------------------------------------------------------------------------------- + kubectl_bin create namespace demand-backup-flow-control-25476 ++ mktemp + local LAST_OUT=/tmp/tmp.LqIX3oD80G ++ mktemp + local LAST_ERR=/tmp/tmp.lTevvfKFNL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace demand-backup-flow-control-25476 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LqIX3oD80G namespace/demand-backup-flow-control-25476 created + cat /tmp/tmp.lTevvfKFNL + rm /tmp/tmp.LqIX3oD80G /tmp/tmp.lTevvfKFNL + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.elctoab2hU +++ mktemp ++ local LAST_ERR=/tmp/tmp.ckxGED2Iu6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.elctoab2hU ++ cat /tmp/tmp.ckxGED2Iu6 ++ rm /tmp/tmp.elctoab2hU /tmp/tmp.ckxGED2Iu6 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6 --namespace=demand-backup-flow-control-25476 ++ mktemp + local LAST_OUT=/tmp/tmp.KpI2Jr5jOO ++ mktemp + local LAST_ERR=/tmp/tmp.nzkqm42BtC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6 --namespace=demand-backup-flow-control-25476 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.KpI2Jr5jOO Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2234-269f3694-3-cluster6" modified. + cat /tmp/tmp.nzkqm42BtC + rm /tmp/tmp.KpI2Jr5jOO /tmp/tmp.nzkqm42BtC + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.LoQ4Yv1O89 ++ mktemp + local LAST_ERR=/tmp/tmp.dv94o2ddlz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LoQ4Yv1O89 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.dv94o2ddlz + rm /tmp/tmp.LoQ4Yv1O89 /tmp/tmp.dv94o2ddlz + return 0 + start_minio + deploy_helm demand-backup-flow-control-25476 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Wed Nov 12 10:27:58 2025 NAMESPACE: demand-backup-flow-control-25476 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.demand-backup-flow-control-25476.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace demand-backup-flow-control-25476 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace demand-backup-flow-control-25476 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace demand-backup-flow-control-25476 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace demand-backup-flow-control-25476 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.E3hkRihuIP +++ mktemp ++ local LAST_ERR=/tmp/tmp.NHy7ZFOH1G ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.E3hkRihuIP ++ cat /tmp/tmp.NHy7ZFOH1G ++ rm /tmp/tmp.E3hkRihuIP /tmp/tmp.NHy7ZFOH1G ++ return 0 + MINIO_POD=minio-service-55fcc5d75f-rzt5s + wait_pod minio-service-55fcc5d75f-rzt5s + local pod=minio-service-55fcc5d75f-rzt5s + local max_retry=480 + local ns= ++ egrep '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ echo minio-service-55fcc5d75f-rzt5s + local container= + set +o xtrace pod/minio-service-55fcc5d75f-rzt5s condition met waiting for pod/minio-service-55fcc5d75f-rzt5s to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.MOOGFJbu4C ++ mktemp + local LAST_ERR=/tmp/tmp.3Sh6TBEELC + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.MOOGFJbu4C make_bucket: operator-testing pod "aws-cli" deleted from demand-backup-flow-control-25476 namespace + cat /tmp/tmp.3Sh6TBEELC + rm /tmp/tmp.MOOGFJbu4C /tmp/tmp.3Sh6TBEELC + return 0 + log 'creating PXC client' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:29:08+0000]' creating PXC client [2025-11-12T10:29:08+0000] creating PXC client + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml ++ mktemp + local LAST_OUT=/tmp/tmp.8rkoxONoN8 ++ mktemp + local LAST_ERR=/tmp/tmp.dqM3IJ6RYV + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/client.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8rkoxONoN8 deployment.apps/pxc-client created + cat /tmp/tmp.dqM3IJ6RYV + rm /tmp/tmp.8rkoxONoN8 /tmp/tmp.dqM3IJ6RYV + return 0 + log 'creating cluster secrets' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:29:10+0000]' creating cluster secrets [2025-11-12T10:29:10+0000] creating cluster secrets + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.mHGir7X7if ++ mktemp + local LAST_ERR=/tmp/tmp.7cZbm89Pa1 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mHGir7X7if secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.7cZbm89Pa1 + rm /tmp/tmp.mHGir7X7if /tmp/tmp.7cZbm89Pa1 + return 0 + log 'create PXC cluster: some-name' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:29:12+0000]' create PXC cluster: some-name [2025-11-12T10:29:12+0000] create PXC cluster: some-name + [[ perconalab/percona-xtradb-cluster-operator:main-pxc8.0 =~ 5\.7 ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/cr.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/cr.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/cr.yml + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2234-269f3694#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.demand-backup-flow-control-25476~ + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc8.0-backup#' ++ mktemp + local LAST_OUT=/tmp/tmp.B4eda4BTAO + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + local LAST_ERR=/tmp/tmp.EC0L0wGc6F + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.B4eda4BTAO perconaxtradbcluster.pxc.percona.com/some-name created + cat /tmp/tmp.EC0L0wGc6F + rm /tmp/tmp.B4eda4BTAO /tmp/tmp.EC0L0wGc6F + return 0 + wait_cluster_consistency some-name 3 2 + local cluster_name=some-name + local cluster_size=3 + local proxy_size=2 + '[' -z 2 ']' + desc 'wait cluster consistency' + set +o xtrace ----------------------------------------------------------------------------------- wait cluster consistency ----------------------------------------------------------------------------------- + local i=0 + local max=300 + sleep 7 + echo -n 'waiting for pxc/some-name to be ready' waiting for pxc/some-name to be ready++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Qx2PrgsGyy +++ mktemp ++ local LAST_ERR=/tmp/tmp.Llkdru5SLW ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Qx2PrgsGyy ++ cat /tmp/tmp.Llkdru5SLW ++ rm /tmp/tmp.Qx2PrgsGyy /tmp/tmp.Llkdru5SLW ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 0 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sb7AwCFfcw +++ mktemp ++ local LAST_ERR=/tmp/tmp.GLZUhMX6Dy ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Sb7AwCFfcw ++ cat /tmp/tmp.GLZUhMX6Dy ++ rm /tmp/tmp.Sb7AwCFfcw /tmp/tmp.GLZUhMX6Dy ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 1 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.TIlqZHtC5r +++ mktemp ++ local LAST_ERR=/tmp/tmp.5Aenr9kYjL ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.TIlqZHtC5r ++ cat /tmp/tmp.5Aenr9kYjL ++ rm /tmp/tmp.TIlqZHtC5r /tmp/tmp.5Aenr9kYjL ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 2 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.FYhjv6BZYH +++ mktemp ++ local LAST_ERR=/tmp/tmp.3VGderzUyb ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.FYhjv6BZYH ++ cat /tmp/tmp.3VGderzUyb ++ rm /tmp/tmp.FYhjv6BZYH /tmp/tmp.3VGderzUyb ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 3 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.AZME7cvlct +++ mktemp ++ local LAST_ERR=/tmp/tmp.K7OY8Aqe6c ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AZME7cvlct ++ cat /tmp/tmp.K7OY8Aqe6c ++ rm /tmp/tmp.AZME7cvlct /tmp/tmp.K7OY8Aqe6c ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 4 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.PAzbQXkInP +++ mktemp ++ local LAST_ERR=/tmp/tmp.YN9oKK7V3T ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.PAzbQXkInP ++ cat /tmp/tmp.YN9oKK7V3T ++ rm /tmp/tmp.PAzbQXkInP /tmp/tmp.YN9oKK7V3T ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 5 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.StpLhe2Hr5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.k4tFTZbxjR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.StpLhe2Hr5 ++ cat /tmp/tmp.k4tFTZbxjR ++ rm /tmp/tmp.StpLhe2Hr5 /tmp/tmp.k4tFTZbxjR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 6 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pye5qS1bfP +++ mktemp ++ local LAST_ERR=/tmp/tmp.XcymN9jwBX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pye5qS1bfP ++ cat /tmp/tmp.XcymN9jwBX ++ rm /tmp/tmp.pye5qS1bfP /tmp/tmp.XcymN9jwBX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 7 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aMweTT9mUr +++ mktemp ++ local LAST_ERR=/tmp/tmp.P3hNjS0CdB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aMweTT9mUr ++ cat /tmp/tmp.P3hNjS0CdB ++ rm /tmp/tmp.aMweTT9mUr /tmp/tmp.P3hNjS0CdB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 8 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.wwLzrBxLrG +++ mktemp ++ local LAST_ERR=/tmp/tmp.6tSgchFWxg ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.wwLzrBxLrG ++ cat /tmp/tmp.6tSgchFWxg ++ rm /tmp/tmp.wwLzrBxLrG /tmp/tmp.6tSgchFWxg ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 9 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.EH7p9hEEjM +++ mktemp ++ local LAST_ERR=/tmp/tmp.JP5wlPO63t ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.EH7p9hEEjM ++ cat /tmp/tmp.JP5wlPO63t ++ rm /tmp/tmp.EH7p9hEEjM /tmp/tmp.JP5wlPO63t ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 10 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.ed18hHd08p +++ mktemp ++ local LAST_ERR=/tmp/tmp.JTpVHdd6I9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ed18hHd08p ++ cat /tmp/tmp.JTpVHdd6I9 ++ rm /tmp/tmp.ed18hHd08p /tmp/tmp.JTpVHdd6I9 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 11 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OonIqHdKsQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.6sJpEsAnOB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OonIqHdKsQ ++ cat /tmp/tmp.6sJpEsAnOB ++ rm /tmp/tmp.OonIqHdKsQ /tmp/tmp.6sJpEsAnOB ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 12 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Sq1etn1h45 +++ mktemp ++ local LAST_ERR=/tmp/tmp.okm5FmOtLc ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Sq1etn1h45 ++ cat /tmp/tmp.okm5FmOtLc ++ rm /tmp/tmp.Sq1etn1h45 /tmp/tmp.okm5FmOtLc ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 13 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.Y3UeRlkwL5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.9Bp0aoJev3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Y3UeRlkwL5 ++ cat /tmp/tmp.9Bp0aoJev3 ++ rm /tmp/tmp.Y3UeRlkwL5 /tmp/tmp.9Bp0aoJev3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 14 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.CUcs4WocXY +++ mktemp ++ local LAST_ERR=/tmp/tmp.zPhx2yFcx3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CUcs4WocXY ++ cat /tmp/tmp.zPhx2yFcx3 ++ rm /tmp/tmp.CUcs4WocXY /tmp/tmp.zPhx2yFcx3 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 15 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.d5vswBKFjA +++ mktemp ++ local LAST_ERR=/tmp/tmp.OtKtBOsZUM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.d5vswBKFjA ++ cat /tmp/tmp.OtKtBOsZUM ++ rm /tmp/tmp.d5vswBKFjA /tmp/tmp.OtKtBOsZUM ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 16 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.i3tZQpz7fJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.GZlS3TMt4C ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.i3tZQpz7fJ ++ cat /tmp/tmp.GZlS3TMt4C ++ rm /tmp/tmp.i3tZQpz7fJ /tmp/tmp.GZlS3TMt4C ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 17 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.HFsPlkNEFm +++ mktemp ++ local LAST_ERR=/tmp/tmp.Ym9B5sqWlu ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.HFsPlkNEFm ++ cat /tmp/tmp.Ym9B5sqWlu ++ rm /tmp/tmp.HFsPlkNEFm /tmp/tmp.Ym9B5sqWlu ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 18 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.8CkEvpuZJS +++ mktemp ++ local LAST_ERR=/tmp/tmp.Zysr92gyuC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.8CkEvpuZJS ++ cat /tmp/tmp.Zysr92gyuC ++ rm /tmp/tmp.8CkEvpuZJS /tmp/tmp.Zysr92gyuC ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 19 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aytWaHbJZX +++ mktemp ++ local LAST_ERR=/tmp/tmp.P9Ph1VqAzI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aytWaHbJZX ++ cat /tmp/tmp.P9Ph1VqAzI ++ rm /tmp/tmp.aytWaHbJZX /tmp/tmp.P9Ph1VqAzI ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 20 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.S4PXZ4zfrQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.uP73zmdEhn ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.S4PXZ4zfrQ ++ cat /tmp/tmp.uP73zmdEhn ++ rm /tmp/tmp.S4PXZ4zfrQ /tmp/tmp.uP73zmdEhn ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 21 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9ev25g3lkv +++ mktemp ++ local LAST_ERR=/tmp/tmp.oWtp2IRy0p ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9ev25g3lkv ++ cat /tmp/tmp.oWtp2IRy0p ++ rm /tmp/tmp.9ev25g3lkv /tmp/tmp.oWtp2IRy0p ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 22 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.nsQUTZENSq +++ mktemp ++ local LAST_ERR=/tmp/tmp.ECTY1WuK6Y ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nsQUTZENSq ++ cat /tmp/tmp.ECTY1WuK6Y ++ rm /tmp/tmp.nsQUTZENSq /tmp/tmp.ECTY1WuK6Y ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 23 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6XsRdbE9J4 +++ mktemp ++ local LAST_ERR=/tmp/tmp.QquDIZMu9v ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6XsRdbE9J4 ++ cat /tmp/tmp.QquDIZMu9v ++ rm /tmp/tmp.6XsRdbE9J4 /tmp/tmp.QquDIZMu9v ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 24 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mS7o6h9faI +++ mktemp ++ local LAST_ERR=/tmp/tmp.8SDp7790R8 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mS7o6h9faI ++ cat /tmp/tmp.8SDp7790R8 ++ rm /tmp/tmp.mS7o6h9faI /tmp/tmp.8SDp7790R8 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 25 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vKee3R10jb +++ mktemp ++ local LAST_ERR=/tmp/tmp.OaKOwsdnuQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vKee3R10jb ++ cat /tmp/tmp.OaKOwsdnuQ ++ rm /tmp/tmp.vKee3R10jb /tmp/tmp.OaKOwsdnuQ ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 26 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.7NoyJLtmuU +++ mktemp ++ local LAST_ERR=/tmp/tmp.2uEYWMDHkX ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.7NoyJLtmuU ++ cat /tmp/tmp.2uEYWMDHkX ++ rm /tmp/tmp.7NoyJLtmuU /tmp/tmp.2uEYWMDHkX ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 27 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.OhOyGCX4wh +++ mktemp ++ local LAST_ERR=/tmp/tmp.qGE6eAXwnw ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.OhOyGCX4wh ++ cat /tmp/tmp.qGE6eAXwnw ++ rm /tmp/tmp.OhOyGCX4wh /tmp/tmp.qGE6eAXwnw ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 28 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.bAAKSDSD5o +++ mktemp ++ local LAST_ERR=/tmp/tmp.43qadIiy5X ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.bAAKSDSD5o ++ cat /tmp/tmp.43qadIiy5X ++ rm /tmp/tmp.bAAKSDSD5o /tmp/tmp.43qadIiy5X ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 29 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.zCcoYQTF46 +++ mktemp ++ local LAST_ERR=/tmp/tmp.izsILYyFxR ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.zCcoYQTF46 ++ cat /tmp/tmp.izsILYyFxR ++ rm /tmp/tmp.zCcoYQTF46 /tmp/tmp.izsILYyFxR ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 30 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.T9OdIr9M7j +++ mktemp ++ local LAST_ERR=/tmp/tmp.eEWAKMxVV7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.T9OdIr9M7j ++ cat /tmp/tmp.eEWAKMxVV7 ++ rm /tmp/tmp.T9OdIr9M7j /tmp/tmp.eEWAKMxVV7 ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 31 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.QgB4QfFPSJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.a9N3VITJHs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QgB4QfFPSJ ++ cat /tmp/tmp.a9N3VITJHs ++ rm /tmp/tmp.QgB4QfFPSJ /tmp/tmp.a9N3VITJHs ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 32 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.cLaVETQ6hB +++ mktemp ++ local LAST_ERR=/tmp/tmp.aQGC8Dfcrm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.cLaVETQ6hB ++ cat /tmp/tmp.aQGC8Dfcrm ++ rm /tmp/tmp.cLaVETQ6hB /tmp/tmp.aQGC8Dfcrm ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 33 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.aArrZVvNs9 +++ mktemp ++ local LAST_ERR=/tmp/tmp.4gSZRLStsV ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aArrZVvNs9 ++ cat /tmp/tmp.4gSZRLStsV ++ rm /tmp/tmp.aArrZVvNs9 /tmp/tmp.4gSZRLStsV ++ return 0 + [[ initializing == \r\e\a\d\y ]] + echo -n . .+ sleep 5 + [[ 34 -ge 300 ]] + let i+=1 ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.state}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.mQOWGZjEyZ +++ mktemp ++ local LAST_ERR=/tmp/tmp.a372srebHd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.state}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.mQOWGZjEyZ ++ cat /tmp/tmp.a372srebHd ++ rm /tmp/tmp.mQOWGZjEyZ /tmp/tmp.a372srebHd ++ return 0 + [[ ready == \r\e\a\d\y ]] ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.pxc.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.eKzY5N48gF +++ mktemp ++ local LAST_ERR=/tmp/tmp.KpUjBxLN38 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.pxc.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.eKzY5N48gF ++ cat /tmp/tmp.KpUjBxLN38 ++ rm /tmp/tmp.eKzY5N48gF /tmp/tmp.KpUjBxLN38 ++ return 0 + [[ 3 == \3 ]] +++ get_proxy_engine some-name +++ local cluster_name=some-name ++++ get_proxy some-name ++++ local target_cluster=some-name +++++ kubectl_bin get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' ++++++ mktemp +++++ local LAST_OUT=/tmp/tmp.DTZwPqMasR ++++++ mktemp +++++ local LAST_ERR=/tmp/tmp.8o1MWD43kz +++++ local exit_status=0 ++++++ seq 0 2 +++++ for i in '$(seq 0 2)' +++++ set +e +++++ kubectl get pxc some-name -o 'jsonpath={.spec.haproxy.enabled}' +++++ exit_status=0 +++++ set -e +++++ '[' 0 '!=' 0 ']' +++++ break +++++ cat /tmp/tmp.DTZwPqMasR +++++ cat /tmp/tmp.8o1MWD43kz +++++ rm /tmp/tmp.DTZwPqMasR /tmp/tmp.8o1MWD43kz +++++ return 0 ++++ [[ true == \t\r\u\e ]] ++++ echo some-name-haproxy ++++ return +++ local cluster_proxy=some-name-haproxy +++ echo haproxy ++ kubectl_bin get pxc some-name -o 'jsonpath={.status.haproxy.ready}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.0OwVw9jsEg +++ mktemp ++ local LAST_ERR=/tmp/tmp.L8v6ED1v3M ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc some-name -o 'jsonpath={.status.haproxy.ready}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.0OwVw9jsEg ++ cat /tmp/tmp.L8v6ED1v3M ++ rm /tmp/tmp.0OwVw9jsEg /tmp/tmp.L8v6ED1v3M ++ return 0 + [[ 2 == \2 ]] + echo + desc 'CASE 1: startingDeadlineSeconds' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: startingDeadlineSeconds ----------------------------------------------------------------------------------- + log 'setting startingDeadlineSeconds to 20' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:08+0000]' setting startingDeadlineSeconds to 20 [2025-11-12T10:33:08+0000] setting startingDeadlineSeconds to 20 + kubectl_bin patch pxc some-name --type=merge -p '{"spec": {"backup": {"startingDeadlineSeconds": 20}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.T6UW5poa3f ++ mktemp + local LAST_ERR=/tmp/tmp.RPCP9Ovtp3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc some-name --type=merge -p '{"spec": {"backup": {"startingDeadlineSeconds": 20}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.T6UW5poa3f perconaxtradbcluster.pxc.percona.com/some-name patched + cat /tmp/tmp.RPCP9Ovtp3 + rm /tmp/tmp.T6UW5poa3f /tmp/tmp.RPCP9Ovtp3 + return 0 + log 'create dummy lock to block backup' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:10+0000]' create dummy lock to block backup [2025-11-12T10:33:10+0000] create dummy lock to block backup + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/lease.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.URTKm7gzgd ++ mktemp + local LAST_ERR=/tmp/tmp.003qro1Ew2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/lease.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.URTKm7gzgd lease.coordination.k8s.io/pxc-some-name-backup-lock created + cat /tmp/tmp.003qro1Ew2 + rm /tmp/tmp.URTKm7gzgd /tmp/tmp.003qro1Ew2 + return 0 + log 'creating pxc-backup/backup1' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:12+0000]' creating pxc-backup/backup1 [2025-11-12T10:33:12+0000] creating pxc-backup/backup1 + run_backup backup1 + local name=backup1 + yq eval '.metadata.name = "backup1"' /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/backup.yml + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.WIZBdl9SRB ++ mktemp + local LAST_ERR=/tmp/tmp.WCWpVVwVQk + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.WIZBdl9SRB perconaxtradbclusterbackup.pxc.percona.com/backup1 created + cat /tmp/tmp.WCWpVVwVQk + rm /tmp/tmp.WIZBdl9SRB /tmp/tmp.WCWpVVwVQk + return 0 + wait_backup backup1 Failed + local backup=backup1 + local status=Failed + set +o xtrace waiting for pxc-backup/backup1 to reach Failed state............Failed + log 'operator should fail backup' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:37+0000]' operator should fail backup [2025-11-12T10:33:37+0000] operator should fail backup + check_backup_error backup1 'starting deadline seconds exceeded' + local name=backup1 + local 'expected=starting deadline seconds exceeded' + grep 'starting deadline seconds exceeded' + kubectl_bin get pxc-backup backup1 -o yaml + yq .status.error ++ mktemp + local LAST_OUT=/tmp/tmp.KD38ZeoqrP ++ mktemp + local LAST_ERR=/tmp/tmp.onZm5ftEq3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup backup1 -o yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.KD38ZeoqrP + cat /tmp/tmp.onZm5ftEq3 + rm /tmp/tmp.KD38ZeoqrP /tmp/tmp.onZm5ftEq3 + return 0 starting deadline seconds exceeded + log 'operator successfully failed the backup job' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:37+0000]' operator successfully failed the backup job [2025-11-12T10:33:37+0000] operator successfully failed the backup job + kubectl_bin delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/lease.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.rk1V4dkJNQ ++ mktemp + local LAST_ERR=/tmp/tmp.4i6NhSQ4K8 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/lease.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rk1V4dkJNQ lease.coordination.k8s.io "pxc-some-name-backup-lock" deleted from demand-backup-flow-control-25476 namespace + cat /tmp/tmp.4i6NhSQ4K8 + rm /tmp/tmp.rk1V4dkJNQ /tmp/tmp.4i6NhSQ4K8 + return 0 + desc 'CASE 1: startingDeadlineSeconds PASSED' + set +o xtrace ----------------------------------------------------------------------------------- CASE 1: startingDeadlineSeconds PASSED ----------------------------------------------------------------------------------- + desc 'CASE 2: suspend and resume' + set +o xtrace ----------------------------------------------------------------------------------- CASE 2: suspend and resume ----------------------------------------------------------------------------------- + log 'setting suspendedDeadlineSeconds to 300' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:39+0000]' setting suspendedDeadlineSeconds to 300 [2025-11-12T10:33:39+0000] setting suspendedDeadlineSeconds to 300 + kubectl_bin patch pxc some-name --type=merge -p '{"spec": {"backup": {"startingDeadlineSeconds": 1800, "suspendedDeadlineSeconds": 300}}}' ++ mktemp + local LAST_OUT=/tmp/tmp.vz2XIswfRs ++ mktemp + local LAST_ERR=/tmp/tmp.ENgbxexWM0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl patch pxc some-name --type=merge -p '{"spec": {"backup": {"startingDeadlineSeconds": 1800, "suspendedDeadlineSeconds": 300}}}' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.vz2XIswfRs perconaxtradbcluster.pxc.percona.com/some-name patched + cat /tmp/tmp.ENgbxexWM0 + rm /tmp/tmp.vz2XIswfRs /tmp/tmp.ENgbxexWM0 + return 0 + log 'creating pxc-backup/backup2' ++ date +%Y-%m-%dT%H:%M:%S%z + echo '[2025-11-12T10:33:40+0000]' creating pxc-backup/backup2 [2025-11-12T10:33:40+0000] creating pxc-backup/backup2 + run_backup backup2 + local name=backup2 + kubectl_bin apply -f - + yq eval '.metadata.name = "backup2"' /mnt/jenkins/workspace/cloud-pxc-operator_PR-2234/e2e-tests/demand-backup-flow-control/conf/backup.yml ++ mktemp + local LAST_OUT=/tmp/tmp.771cvkRPmz ++ mktemp + local LAST_ERR=/tmp/tmp.RIpffnc5jE + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.771cvkRPmz perconaxtradbclusterbackup.pxc.percona.com/backup2 created + cat /tmp/tmp.RIpffnc5jE + rm /tmp/tmp.771cvkRPmz /tmp/tmp.RIpffnc5jE + return 0 + wait_backup backup2 Running + local backup=backup2 + local status=Running + set +o xtrace waiting for pxc-backup/backup2 to reach Running state........................................................................................................................................................................................................................................................................................................................................................................2025-11-12T10:27:12.459Z INFO setup Runs on {"platform": "kubernetes", "version": "v1.31.13-gke.1231000"} 2025-11-12T10:27:12.459Z INFO setup Manager starting up {"gitCommit": "269f3694d9306fde7f2ffd7864a3f22b0ef247c5", "gitBranch": "PR-2234-269f3694", "buildTime": "2025-11-12T09:55:34Z", "goVersion": "go1.25.4", "os": "linux", "arch": "amd64"} 2025-11-12T10:27:12.462Z INFO setup Registering Components. 2025-11-12T10:27:12.937Z INFO controller-runtime.webhook Registering webhook {"path": "/validate-percona-xtradbcluster"} 2025-11-12T10:27:12.937Z INFO setup Starting the Cmd. 2025-11-12T10:27:12.938Z INFO controller-runtime.metrics Starting metrics server 2025-11-12T10:27:12.938Z INFO controller-runtime.metrics Serving metrics server {"bindAddress": ":8080", "secure": false} 2025-11-12T10:27:12.938Z INFO starting server {"name": "health probe", "addr": "[::]:8081"} 2025-11-12T10:27:12.938Z INFO controller-runtime.webhook Starting webhook server 2025-11-12T10:27:12.938Z INFO controller-runtime.certwatcher Updated current TLS certificate {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key"} 2025-11-12T10:27:12.938Z INFO controller-runtime.webhook Serving webhook server {"host": "", "port": 9443} 2025-11-12T10:27:12.939Z INFO controller-runtime.certwatcher Starting certificate poll+watcher {"cert": "/tmp/k8s-webhook-server/serving-certs/tls.crt", "key": "/tmp/k8s-webhook-server/serving-certs/tls.key", "interval": "10s"} 2025-11-12T10:27:13.039Z INFO attempting to acquire leader lease pxc-operator/08db1feb.percona.com... 2025-11-12T10:27:13.086Z INFO successfully acquired lease pxc-operator/08db1feb.percona.com 2025-11-12T10:27:13.086Z DEBUG events percona-xtradb-cluster-operator-b5f9c4897-blml5_351d0d4c-a9f9-47d8-9bb8-1657828656e7 became leader {"type": "Normal", "object": {"kind":"Lease","namespace":"pxc-operator","name":"08db1feb.percona.com","uid":"9b051163-f917-474c-b4b1-818e3292b129","apiVersion":"coordination.k8s.io/v1","resourceVersion":"1762943233082063009"}, "reason": "LeaderElection"} 2025-11-12T10:27:13.087Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.Secret"} 2025-11-12T10:27:13.087Z INFO Starting EventSource {"controller": "pxc-controller", "source": "kind source: *v1.PerconaXtraDBCluster"} 2025-11-12T10:27:13.087Z INFO Starting EventSource {"controller": "pxcrestore-controller", "source": "kind source: *v1.PerconaXtraDBClusterRestore"} 2025-11-12T10:27:13.087Z INFO Starting EventSource {"controller": "pxcbackup-controller", "source": "kind source: *v1.PerconaXtraDBClusterBackup"} 2025-11-12T10:27:13.187Z INFO Starting Controller {"controller": "pxcrestore-controller"} 2025-11-12T10:27:13.187Z INFO Starting workers {"controller": "pxcrestore-controller", "worker count": 1} 2025-11-12T10:27:13.187Z INFO Starting Controller {"controller": "pxc-controller"} 2025-11-12T10:27:13.187Z INFO Starting workers {"controller": "pxc-controller", "worker count": 1} 2025-11-12T10:27:13.187Z INFO Starting Controller {"controller": "pxcbackup-controller"} 2025-11-12T10:27:13.187Z INFO Starting workers {"controller": "pxcbackup-controller", "worker count": 1} 2025-11-12T10:29:15.441Z INFO Set CR version {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "version": "1.19.0"} 2025-11-12T10:29:15.766Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "auto-some-name-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2025-11-12T10:29:15.784Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-pxc", "kind": "&TypeMeta{Kind:ConfigMap,APIVersion:v1,}"} 2025-11-12T10:29:15.900Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-haproxy", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-12T10:29:15.964Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-pxc", "kind": "&TypeMeta{Kind:StatefulSet,APIVersion:apps/v1,}"} 2025-11-12T10:29:16.012Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-pxc", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-12T10:29:16.043Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-pxc-unready", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-12T10:29:16.099Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-haproxy", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-12T10:29:16.363Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "aadc682a-92e1-4744-8e54-688fb3305f49", "object": "some-name-haproxy-replicas", "kind": "&TypeMeta{Kind:Service,APIVersion:v1,}"} 2025-11-12T10:29:17.207Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "4e4b222a-ef13-4334-9f04-1491e75e19d9", "object": "some-name-pxc", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-12T10:29:17.224Z DEBUG Creating object {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "4e4b222a-ef13-4334-9f04-1491e75e19d9", "object": "some-name-haproxy", "kind": "&TypeMeta{Kind:PodDisruptionBudget,APIVersion:policy/v1,}"} 2025-11-12T10:30:33.529Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa", "user": "operator"} 2025-11-12T10:30:33.558Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa", "user": "monitor"} 2025-11-12T10:30:33.623Z INFO User monitor: granted privileges {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa"} 2025-11-12T10:30:33.661Z INFO monitor user privileges granted {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa"} 2025-11-12T10:30:33.694Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa", "user": "xtrabackup"} 2025-11-12T10:30:33.734Z INFO User xtrabackup: granted privileges {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa"} 2025-11-12T10:30:33.769Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "f595f3ca-26f7-45b6-a028-8bf9149a1faa", "user": "replication"} 2025-11-12T10:32:58.936Z INFO Password expiration policy updated {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "76f57499-77dc-4d43-8d44-863ee3a8e42b", "user": "root"} 2025-11-12T10:32:59.081Z INFO update PXC version (fetched from db) {"controller": "pxc-controller", "namespace": "demand-backup-flow-control-25476", "name": "some-name", "reconcileID": "76f57499-77dc-4d43-8d44-863ee3a8e42b", "new version": "8.0.43-34.1"} 2025-11-12T10:33:15.584Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "0927032c-7a21-45a4-8955-08492b482798", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:15.685Z INFO Another backup is holding the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "0927032c-7a21-45a4-8955-08492b482798", "cluster": "some-name", "storage": "minio", "holder": "dummy-backup"} 2025-11-12T10:33:15.685Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "b2774a7b-992a-44d3-805f-170a04d2cb39", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:15.685Z INFO Another backup is holding the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "b2774a7b-992a-44d3-805f-170a04d2cb39", "cluster": "some-name", "storage": "minio", "holder": "dummy-backup"} 2025-11-12T10:33:20.685Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "5126b346-2486-44f8-b9df-0e2d7b7b6125", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:20.685Z INFO Another backup is holding the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "5126b346-2486-44f8-b9df-0e2d7b7b6125", "cluster": "some-name", "storage": "minio", "holder": "dummy-backup"} 2025-11-12T10:33:25.686Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "7d832626-6066-4241-b9b2-00912928a882", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:25.686Z INFO Another backup is holding the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "7d832626-6066-4241-b9b2-00912928a882", "cluster": "some-name", "storage": "minio", "holder": "dummy-backup"} 2025-11-12T10:33:30.687Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "2b8915c8-3185-4d72-8508-5e705086f48d", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:30.687Z INFO Another backup is holding the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "2b8915c8-3185-4d72-8508-5e705086f48d", "cluster": "some-name", "storage": "minio", "holder": "dummy-backup"} 2025-11-12T10:33:35.687Z INFO Backup didn't start in startingDeadlineSeconds, failing the backup {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "a026c9ec-1bd2-421c-aa95-3e5d2594cf3d", "startingDeadlineSeconds": 20, "passedSeconds": 20.68769242} 2025-11-12T10:33:35.714Z INFO Releasing backup lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "a026c9ec-1bd2-421c-aa95-3e5d2594cf3d", "cluster": "some-name", "lease": "pxc-some-name-backup-lock"} 2025-11-12T10:33:35.714Z ERROR failed to release the lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup1", "reconcileID": "a026c9ec-1bd2-421c-aa95-3e5d2594cf3d", "cluster": "some-name", "error": "not the holder", "errorVerbose": "not the holder\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/k8s.init\n\t:1\nruntime.doInit1\n\t/usr/local/go/src/runtime/proc.go:7670\nruntime.doInit\n\t/usr/local/go/src/runtime/proc.go:7637\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:256\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup.(*ReconcilePerconaXtraDBClusterBackup).Reconcile.func2 /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup/controller.go:215 github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup.(*ReconcilePerconaXtraDBClusterBackup).Reconcile /go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup/controller.go:306 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:216 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:461 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:421 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:296 2025-11-12T10:33:43.030Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup2", "reconcileID": "b730b09e-70db-4413-8e2b-92afc7566e17", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:43.091Z INFO Created a new backup job {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup2", "reconcileID": "b730b09e-70db-4413-8e2b-92afc7566e17", "namespace": "demand-backup-flow-control-25476", "name": "xb-backup2"} 2025-11-12T10:33:43.091Z DEBUG Check if parallel backups are allowed {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup2", "reconcileID": "a708c56c-768d-474a-aa6c-6dfac2f2073a", "cluster": "some-name", "storage": "minio", "allowed": false} 2025-11-12T10:33:43.147Z INFO Releasing backup lock {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup2", "reconcileID": "a708c56c-768d-474a-aa6c-6dfac2f2073a", "cluster": "some-name", "storage": "minio", "lease": "pxc-some-name-backup-lock"} 2025-11-12T10:33:43.185Z ERROR Reconciler error {"controller": "pxcbackup-controller", "namespace": "demand-backup-flow-control-25476", "name": "backup2", "reconcileID": "a708c56c-768d-474a-aa6c-6dfac2f2073a", "error": "create backup job: create backup job: jobs.batch \"xb-backup2\" already exists", "errorVerbose": "jobs.batch \"xb-backup2\" already exists\ncreate backup job\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup.(*ReconcilePerconaXtraDBClusterBackup).createBackupJob\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup/controller.go:383\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup.(*ReconcilePerconaXtraDBClusterBackup).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup/controller.go:291\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:216\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:461\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:421\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:296\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693\ncreate backup job\ngithub.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup.(*ReconcilePerconaXtraDBClusterBackup).Reconcile\n\t/go/src/github.com/percona/percona-xtradb-cluster-operator/pkg/controller/pxcbackup/controller.go:293\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Reconcile\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:216\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:461\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:421\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1\n\t/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:296\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:1693"} sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).reconcileHandler /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:474 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).processNextWorkItem /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:421 sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller[...]).Start.func1.1 /go/pkg/mod/sigs.k8s.io/controller-runtime@v0.22.4/pkg/internal/controller/controller.go:296 max retry count 360 reached. something went wrong with operator or kubernetes cluster