Log: /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/logs/scheduled-backup-5-7.log Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 + main + create_infra scheduled-backup-28123 + local ns=scheduled-backup-28123 + '[' -n pxc-operator ']' + kubectl get pxc --all-namespaces -o wide + grep -v NAMESPACE + xargs -L 1 sh -xc 'kubectl patch pxc -n $0 $1 --type=merge -p "{\"metadata\":{\"finalizers\":[]}}"' + kubectl patch pxc -n scheduled-backup-26503 scheduled-backup --type=merge -p '{"metadata":{"finalizers":[]}}' perconaxtradbcluster.pxc.percona.com/scheduled-backup patched (no change) + kubectl_bin delete pxc --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tOjyPyg7by ++ mktemp + local LAST_ERR=/tmp/tmp.uwWTsH483c + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tOjyPyg7by perconaxtradbcluster.pxc.percona.com "scheduled-backup" deleted from scheduled-backup-26503 namespace + cat /tmp/tmp.uwWTsH483c + rm /tmp/tmp.tOjyPyg7by /tmp/tmp.uwWTsH483c + return 0 + kubectl_bin delete pxc-backup --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.tWkPcKh4L9 ++ mktemp + local LAST_ERR=/tmp/tmp.sQQm6hlvJL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-backup --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.tWkPcKh4L9 perconaxtradbclusterbackup.pxc.percona.com "cron-scheduled-backup-pvc-2025112861331-q6fav" deleted from scheduled-backup-26503 namespace + cat /tmp/tmp.sQQm6hlvJL + rm /tmp/tmp.tWkPcKh4L9 /tmp/tmp.sQQm6hlvJL + return 0 + kubectl_bin delete pxc-restore --all --all-namespaces ++ mktemp + local LAST_OUT=/tmp/tmp.f1QRCWy2M5 ++ mktemp + local LAST_ERR=/tmp/tmp.94QhS7YS93 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete pxc-restore --all --all-namespaces + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f1QRCWy2M5 No resources found + cat /tmp/tmp.94QhS7YS93 + rm /tmp/tmp.f1QRCWy2M5 /tmp/tmp.94QhS7YS93 + return 0 + create_namespace pxc-operator + local namespace=pxc-operator + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// ++ tail -n1 + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ awk '{print $1}' ++ grep chaos-mesh.org + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrole ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces pxc-operator' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin delete namespace pxc-operator + kubectl_bin get ns ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.zQ8T7Ncqvt + local LAST_OUT=/tmp/tmp.qvBsOor6uC ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.aoiS9omYlq + local exit_status=0 + local LAST_ERR=/tmp/tmp.wO5TcqjP7P + local exit_status=0 ++ seq 0 2 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get ns + for i in '$(seq 0 2)' + set +e + kubectl delete namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.zQ8T7Ncqvt + cat /tmp/tmp.aoiS9omYlq + rm /tmp/tmp.zQ8T7Ncqvt /tmp/tmp.aoiS9omYlq + return 0 namespace "scheduled-backup-26503" deleted + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.qvBsOor6uC namespace "pxc-operator" deleted + cat /tmp/tmp.wO5TcqjP7P + rm /tmp/tmp.qvBsOor6uC /tmp/tmp.wO5TcqjP7P + return 0 + wait_for_delete namespace/pxc-operator + local res=namespace/pxc-operator + echo -n 'waiting for namespace/pxc-operator to be deleted' waiting for namespace/pxc-operator to be deleted+ set +o xtrace Error from server (NotFound): namespaces "pxc-operator" not found + desc 'create namespace pxc-operator' + set +o xtrace ----------------------------------------------------------------------------------- create namespace pxc-operator ----------------------------------------------------------------------------------- + kubectl_bin create namespace pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.bZ8YvwFMXb ++ mktemp + local LAST_ERR=/tmp/tmp.yAf1Z20xq0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.bZ8YvwFMXb namespace/pxc-operator created + cat /tmp/tmp.yAf1Z20xq0 + rm /tmp/tmp.bZ8YvwFMXb /tmp/tmp.yAf1Z20xq0 + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.nLuuFJpCVr +++ mktemp ++ local LAST_ERR=/tmp/tmp.oBkup7zunT ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.nLuuFJpCVr ++ cat /tmp/tmp.oBkup7zunT ++ rm /tmp/tmp.nLuuFJpCVr /tmp/tmp.oBkup7zunT ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=pxc-operator ++ mktemp + local LAST_OUT=/tmp/tmp.hYLO5u6x2c ++ mktemp + local LAST_ERR=/tmp/tmp.MBYsejTRzT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=pxc-operator + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hYLO5u6x2c Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4" modified. + cat /tmp/tmp.MBYsejTRzT + rm /tmp/tmp.hYLO5u6x2c /tmp/tmp.MBYsejTRzT + return 0 + deploy_operator + desc 'start PXC operator' + set +o xtrace ----------------------------------------------------------------------------------- start PXC operator ----------------------------------------------------------------------------------- + kubectl_bin apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/crd.yaml ++ mktemp + local LAST_OUT=/tmp/tmp.2e2k8fKNw0 ++ mktemp + local LAST_ERR=/tmp/tmp.WzhuLn7k5Y + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/crd.yaml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2e2k8fKNw0 customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterbackups.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusterrestores.pxc.percona.com serverside-applied customresourcedefinition.apiextensions.k8s.io/perconaxtradbclusters.pxc.percona.com serverside-applied + cat /tmp/tmp.WzhuLn7k5Y + rm /tmp/tmp.2e2k8fKNw0 /tmp/tmp.WzhuLn7k5Y + return 0 + '[' -n pxc-operator ']' + apply_rbac cw-rbac + local operator_namespace=pxc-operator + local rbac=cw-rbac + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/cw-rbac.yaml + sed -e 's^namespace: .*^namespace: pxc-operator^' + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.gyHZtpdHUq ++ mktemp + local LAST_ERR=/tmp/tmp.vq9fxA67np + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.gyHZtpdHUq clusterrole.rbac.authorization.k8s.io/percona-xtradb-cluster-operator unchanged serviceaccount/percona-xtradb-cluster-operator created clusterrolebinding.rbac.authorization.k8s.io/service-account-percona-xtradb-cluster-operator unchanged + cat /tmp/tmp.vq9fxA67np + rm /tmp/tmp.gyHZtpdHUq /tmp/tmp.vq9fxA67np + return 0 + sed -e 's^image: .*^image: perconalab/percona-xtradb-cluster-operator:PR-2076-09e03a97^' + sed -e 's^failureThreshold: .*^failureThreshold: 10^' + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/deploy/cw-operator.yaml + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "DISABLE_TELEMETRY").value) = "true"' - + yq eval '(select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "percona-xtradb-cluster-operator").env[] | select(.name == "LOG_LEVEL").value) = "VERBOSE"' - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.OHDu0OUDIC ++ mktemp + local LAST_ERR=/tmp/tmp.DF7Qx2XdUj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.OHDu0OUDIC deployment.apps/percona-xtradb-cluster-operator created service/percona-xtradb-cluster-operator created + cat /tmp/tmp.DF7Qx2XdUj + rm /tmp/tmp.OHDu0OUDIC /tmp/tmp.DF7Qx2XdUj + return 0 + sleep 10 + kubectl_bin wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s ++ mktemp + local LAST_OUT=/tmp/tmp.ib6NqwmytT ++ mktemp + local LAST_ERR=/tmp/tmp.ZTr9XsYSAv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pods -l app.kubernetes.io/component=operator,app.kubernetes.io/instance=percona-xtradb-cluster-operator,app.kubernetes.io/name=percona-xtradb-cluster-operator --timeout=30s + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ib6NqwmytT pod/percona-xtradb-cluster-operator-86d86d5cd6-7smwq condition met + cat /tmp/tmp.ZTr9XsYSAv + rm /tmp/tmp.ib6NqwmytT /tmp/tmp.ZTr9XsYSAv + return 0 ++ get_operator_pod ++ local label_prefix=app.kubernetes.io/ +++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -n pxc-operator +++ grep -c percona-xtradb-cluster-operator ++ local check_label=1 ++ [[ 1 -eq 0 ]] ++ kubectl_bin get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator +++ mktemp ++ local LAST_OUT=/tmp/tmp.Wg99uH5xeL +++ mktemp ++ local LAST_ERR=/tmp/tmp.togxfWOAxs ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=app.kubernetes.io/name=percona-xtradb-cluster-operator -o 'jsonpath={.items[].metadata.name}' -n pxc-operator ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Wg99uH5xeL ++ cat /tmp/tmp.togxfWOAxs ++ rm /tmp/tmp.Wg99uH5xeL /tmp/tmp.togxfWOAxs ++ return 0 + wait_pod percona-xtradb-cluster-operator-86d86d5cd6-7smwq 480 pxc-operator + local pod=percona-xtradb-cluster-operator-86d86d5cd6-7smwq + local max_retry=480 + local ns=pxc-operator ++ echo percona-xtradb-cluster-operator-86d86d5cd6-7smwq ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/percona-xtradb-cluster-operator-86d86d5cd6-7smwq condition met waiting for pod/percona-xtradb-cluster-operator-86d86d5cd6-7smwq to become Ready.Ok + sleep 3 + create_namespace scheduled-backup-28123 + local namespace=scheduled-backup-28123 + local skip_clean_namespace= + [[ 1 == 1 ]] + [[ -z '' ]] + destroy_chaos_mesh ++ helm list --all-namespaces --filter chaos-mesh ++ tail -n1 ++ awk '-F ' '{print $2}' ++ sed s/NAMESPACE// + local chaos_mesh_ns= + '[' -n '' ']' ++ kubectl get MutatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete MutatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl get ValidatingWebhookConfiguration ++ grep validate-auth ++ awk '{print $1}' + timeout 30 kubectl delete ValidatingWebhookConfiguration error: resource(s) were provided, but no name was specified + : ++ kubectl api-resources ++ grep chaos-mesh ++ awk '{print $1}' ++ kubectl get crd ++ grep chaos-mesh.org ++ awk '{print $1}' + timeout 30 kubectl delete crd error: resource(s) were provided, but no name was specified + : ++ kubectl get clusterrolebinding ++ grep chaos-mesh ++ awk '{print $1}' + timeout 30 kubectl delete clusterrolebinding error: resource(s) were provided, but no name was specified + : ++ awk '{print $1}' ++ kubectl get clusterrole ++ grep chaos-mesh + timeout 30 kubectl delete clusterrole error: resource(s) were provided, but no name was specified + : + desc 'cleaned up all old namespaces' + set +o xtrace ----------------------------------------------------------------------------------- cleaned up all old namespaces ----------------------------------------------------------------------------------- + kubectl_bin get ns + awk '{print$1}' + '[' -n '' ']' + desc 'cleaned up old namespaces scheduled-backup-28123' + xargs kubectl delete ns + set +o xtrace ----------------------------------------------------------------------------------- cleaned up old namespaces scheduled-backup-28123 ----------------------------------------------------------------------------------- + kubectl_bin delete namespace scheduled-backup-28123 + grep -E -v '^kube-|^default|Terminating|pxc-operator|openshift|^gke-|^gmp-|^NAME' ++ mktemp ++ mktemp + local LAST_OUT=/tmp/tmp.T0nf02XNb4 + local LAST_OUT=/tmp/tmp.f9c8igphJ7 ++ mktemp ++ mktemp + local LAST_ERR=/tmp/tmp.cRja7Jo2Ea + local exit_status=0 ++ seq 0 2 + local LAST_ERR=/tmp/tmp.sxBQ4IjcH0 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-28123 + for i in '$(seq 0 2)' + set +e + kubectl get ns + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-28123 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.f9c8igphJ7 + cat /tmp/tmp.sxBQ4IjcH0 + rm /tmp/tmp.f9c8igphJ7 /tmp/tmp.sxBQ4IjcH0 + return 0 error: resource(s) were provided, but no name was specified + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl delete namespace scheduled-backup-28123 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.T0nf02XNb4 + cat /tmp/tmp.cRja7Jo2Ea Error from server (NotFound): namespaces "scheduled-backup-28123" not found + rm /tmp/tmp.T0nf02XNb4 /tmp/tmp.cRja7Jo2Ea + return 1 + : + wait_for_delete namespace/scheduled-backup-28123 + local res=namespace/scheduled-backup-28123 + echo -n 'waiting for namespace/scheduled-backup-28123 to be deleted' waiting for namespace/scheduled-backup-28123 to be deleted+ set +o xtrace Error from server (NotFound): namespaces "scheduled-backup-28123" not found + desc 'create namespace scheduled-backup-28123' + set +o xtrace ----------------------------------------------------------------------------------- create namespace scheduled-backup-28123 ----------------------------------------------------------------------------------- + kubectl_bin create namespace scheduled-backup-28123 ++ mktemp + local LAST_OUT=/tmp/tmp.rJ0IrGYhMq ++ mktemp + local LAST_ERR=/tmp/tmp.4Gyd3PdDLR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl create namespace scheduled-backup-28123 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.rJ0IrGYhMq namespace/scheduled-backup-28123 created + cat /tmp/tmp.4Gyd3PdDLR + rm /tmp/tmp.rJ0IrGYhMq /tmp/tmp.4Gyd3PdDLR + return 0 ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.QE2YHUiGG8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.bOQQYHF7C7 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.QE2YHUiGG8 ++ cat /tmp/tmp.bOQQYHF7C7 ++ rm /tmp/tmp.QE2YHUiGG8 /tmp/tmp.bOQQYHF7C7 ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=scheduled-backup-28123 ++ mktemp + local LAST_OUT=/tmp/tmp.9yxqDJCo9c ++ mktemp + local LAST_ERR=/tmp/tmp.1urKjY2eTh + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=scheduled-backup-28123 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.9yxqDJCo9c Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4" modified. + cat /tmp/tmp.1urKjY2eTh + rm /tmp/tmp.9yxqDJCo9c /tmp/tmp.1urKjY2eTh + return 0 + apply_secrets + desc 'create secrets for cloud storages' + set +o xtrace ----------------------------------------------------------------------------------- create secrets for cloud storages ----------------------------------------------------------------------------------- + '[' -z '' ']' + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/cloud-secret.yml ++ mktemp + local LAST_OUT=/tmp/tmp.F5swsb8zd4 ++ mktemp + local LAST_ERR=/tmp/tmp.9Rdch5a2vW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/minio-secret.yml -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/cloud-secret.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.F5swsb8zd4 secret/minio-secret created secret/aws-s3-secret created secret/gcp-cs-secret created secret/azure-secret created + cat /tmp/tmp.9Rdch5a2vW + rm /tmp/tmp.F5swsb8zd4 /tmp/tmp.9Rdch5a2vW + return 0 + start_minio + deploy_helm scheduled-backup-28123 + helm repo add hashicorp https://helm.releases.hashicorp.com "hashicorp" already exists with the same configuration, skipping + helm repo add minio https://charts.min.io/ "minio" already exists with the same configuration, skipping + helm repo update Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the "minio" chart repository ...Successfully got an update from the "hashicorp" chart repository Update Complete. ⎈Happy Helming!⎈ + local cert_secret= + local endpoint=http://minio-service:9000 + minio_args=(--version $MINIO_VER --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set "users[0].accessKey=some-access-key" --set "users[0].secretKey=some-secret-key" --set "users[0].policy=consoleAdmin" --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G) + local minio_args + [[ -n '' ]] + desc 'install Minio' + set +o xtrace ----------------------------------------------------------------------------------- install Minio ----------------------------------------------------------------------------------- + helm uninstall minio-service Error: uninstall: Release not loaded: minio-service: release: not found + : + retry 10 60 helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio + local max=10 + local delay=60 + shift 2 + local n=1 + helm install minio-service --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/ --set securityContext.enabled=false --set persistence.size=2G minio/minio NAME: minio-service LAST DEPLOYED: Fri Nov 28 06:20:28 2025 NAMESPACE: scheduled-backup-28123 STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: MinIO can be accessed via port 9000 on the following DNS name from within your cluster: minio-service.scheduled-backup-28123.cluster.local To access MinIO from localhost, run the below commands: 1. export POD_NAME=$(kubectl get pods --namespace scheduled-backup-28123 -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") 2. kubectl port-forward $POD_NAME 9000 --namespace scheduled-backup-28123 Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace scheduled-backup-28123 minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace scheduled-backup-28123 minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 3. mc ls minio-service-local + sleep 30 ++ kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.xgFaQ196wg +++ mktemp ++ local LAST_ERR=/tmp/tmp.KLkbHj28gq ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.xgFaQ196wg ++ cat /tmp/tmp.KLkbHj28gq ++ rm /tmp/tmp.xgFaQ196wg /tmp/tmp.KLkbHj28gq ++ return 0 + MINIO_POD=minio-service-55fcc5d75f-f5bbt + wait_pod minio-service-55fcc5d75f-f5bbt + local pod=minio-service-55fcc5d75f-f5bbt + local max_retry=480 + local ns= ++ echo minio-service-55fcc5d75f-f5bbt ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/minio-service-55fcc5d75f-f5bbt condition met waiting for pod/minio-service-55fcc5d75f-f5bbt to become Ready.Ok + kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing ++ mktemp + local LAST_OUT=/tmp/tmp.0VqGkehuMj ++ mktemp + local LAST_ERR=/tmp/tmp.btzz5nTDT7 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 --no-verify-ssl s3 mb s3://operator-testing + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.0VqGkehuMj make_bucket: operator-testing pod "aws-cli" deleted from scheduled-backup-28123 namespace + cat /tmp/tmp.btzz5nTDT7 All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. If you don't see a command prompt, try pressing enter. warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_scheduled-backup-28123 + rm /tmp/tmp.0VqGkehuMj /tmp/tmp.btzz5nTDT7 + return 0 + cluster=scheduled-backup + cat - + kubectl_bin apply -f - ++ mktemp + local LAST_OUT=/tmp/tmp.2aOhH8fJmq ++ mktemp + local LAST_ERR=/tmp/tmp.de5ycEoIsT + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2aOhH8fJmq priorityclass.scheduling.k8s.io/high-priority configured + cat /tmp/tmp.de5ycEoIsT + rm /tmp/tmp.2aOhH8fJmq /tmp/tmp.de5ycEoIsT + return 0 + spinup_pxc scheduled-backup /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local cluster=scheduled-backup + local config=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + local size=3 + local sleep=10 + local secretsFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/secrets.yml + local pxcClientFile=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + local port=3306 + desc 'create first PXC cluster' + set +o xtrace ----------------------------------------------------------------------------------- create first PXC cluster ----------------------------------------------------------------------------------- + kubectl_bin apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/secrets.yml ++ mktemp + local LAST_OUT=/tmp/tmp.BcwUwnocQq ++ mktemp + local LAST_ERR=/tmp/tmp.7SnKHvoGOt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/secrets.yml + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.BcwUwnocQq secret/my-cluster-secrets created secret/some-name-ssl created secret/some-name-ssl-internal created + cat /tmp/tmp.7SnKHvoGOt + rm /tmp/tmp.BcwUwnocQq /tmp/tmp.7SnKHvoGOt + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/conf/client.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + local LAST_OUT=/tmp/tmp.LGNn4eDCH6 + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-09e03a97#' + local LAST_ERR=/tmp/tmp.DMSJB4sMST + local exit_status=0 + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-28123~ + /usr/bin/sed -e 's#apply:.*#apply: Never#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.LGNn4eDCH6 deployment.apps/pxc-client created + cat /tmp/tmp.DMSJB4sMST + rm /tmp/tmp.LGNn4eDCH6 /tmp/tmp.DMSJB4sMST + return 0 + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ scheduled-backup == \d\e\m\a\n\d\-\b\a\c\k\u\p ]] + [[ scheduled-backup == \d\e\m\a\n\d\-\b\a\c\k\u\p\-\c\l\o\u\d ]] + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-init.yml + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-09e03a97#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-28123~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + kubectl_bin apply -f - + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' ++ mktemp + local LAST_OUT=/tmp/tmp.NkJTJ9P5dm ++ mktemp + local LAST_ERR=/tmp/tmp.bTqg29DWOz + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.NkJTJ9P5dm perconaxtradbcluster.pxc.percona.com/scheduled-backup created + cat /tmp/tmp.bTqg29DWOz + rm /tmp/tmp.NkJTJ9P5dm /tmp/tmp.bTqg29DWOz + return 0 + desc 'check if all 3 Pods started' + set +o xtrace ----------------------------------------------------------------------------------- check if all 3 Pods started ----------------------------------------------------------------------------------- ++ get_proxy scheduled-backup ++ local target_cluster=scheduled-backup +++ kubectl_bin get pxc scheduled-backup -o 'jsonpath={.spec.haproxy.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.O5e1XqQv2y ++++ mktemp +++ local LAST_ERR=/tmp/tmp.yMY1xYtjHC +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc scheduled-backup -o 'jsonpath={.spec.haproxy.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.O5e1XqQv2y +++ cat /tmp/tmp.yMY1xYtjHC +++ rm /tmp/tmp.O5e1XqQv2y /tmp/tmp.yMY1xYtjHC +++ return 0 ++ [[ '' == \t\r\u\e ]] +++ kubectl_bin get pxc scheduled-backup -o 'jsonpath={.spec.proxysql.enabled}' ++++ mktemp +++ local LAST_OUT=/tmp/tmp.ZjiUMXXMdE ++++ mktemp +++ local LAST_ERR=/tmp/tmp.gu5NigfeD4 +++ local exit_status=0 ++++ seq 0 2 +++ for i in '$(seq 0 2)' +++ set +e +++ kubectl get pxc scheduled-backup -o 'jsonpath={.spec.proxysql.enabled}' +++ exit_status=0 +++ set -e +++ '[' 0 '!=' 0 ']' +++ break +++ cat /tmp/tmp.ZjiUMXXMdE +++ cat /tmp/tmp.gu5NigfeD4 +++ rm /tmp/tmp.ZjiUMXXMdE /tmp/tmp.gu5NigfeD4 +++ return 0 ++ [[ true == \t\r\u\e ]] ++ echo scheduled-backup-proxysql ++ return + local proxy=scheduled-backup-proxysql + kubectl_bin wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-28123 ++ mktemp + local LAST_OUT=/tmp/tmp.bnw2AEeIDY ++ mktemp + local LAST_ERR=/tmp/tmp.RBsImCve12 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-28123 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-28123 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + for i in '$(seq 0 2)' + set +e + kubectl wait --for=condition=Ready pod -l app.kubernetes.io/instance=monitoring,app.kubernetes.io/managed-by=percona-xtradb-cluster-operator --timeout=300s -n scheduled-backup-28123 + exit_status=1 + set -e + '[' 1 '!=' 0 ']' + '[' 1 == 1 ']' + sleep 0 + cat /tmp/tmp.bnw2AEeIDY + cat /tmp/tmp.RBsImCve12 error: no matching resources found + rm /tmp/tmp.bnw2AEeIDY /tmp/tmp.RBsImCve12 + return 1 + true + wait_for_running scheduled-backup-proxysql 1 + local name=scheduled-backup-proxysql + let last_pod=0 + : + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 0 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-proxysql-0 480 + local pod=scheduled-backup-proxysql-0 + local max_retry=480 + local ns= ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' ++ echo scheduled-backup-proxysql-0 + local container=proxysql + set +o xtrace pod/scheduled-backup-proxysql-0 condition met waiting for pod/scheduled-backup-proxysql-0 to become Ready.Ok + wait_for_running scheduled-backup-pxc 3 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=480 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 480 + local pod=scheduled-backup-pxc-0 + local max_retry=480 + local ns= ++ echo scheduled-backup-pxc-0 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 480 + local pod=scheduled-backup-pxc-1 + local max_retry=480 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 480 + local pod=scheduled-backup-pxc-2 + local max_retry=480 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + sleep 10 ++ kubectl get pxc scheduled-backup -o 'jsonpath={.spec.secretsName}' + local secret_name=my-cluster-secrets ++ getSecretData my-cluster-secrets root ++ local secretName=my-cluster-secrets ++ local dataKey=root ++ kubectl_bin get secrets/my-cluster-secrets '--template={{.data.root}}' ++ base64 --decode +++ mktemp ++ local LAST_OUT=/tmp/tmp.XndChYBCBh +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZxsRFUqeag ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get secrets/my-cluster-secrets '--template={{.data.root}}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.XndChYBCBh ++ cat /tmp/tmp.ZxsRFUqeag ++ rm /tmp/tmp.XndChYBCBh /tmp/tmp.ZxsRFUqeag ++ return 0 + local root_pass=root_password + desc 'write data' + set +o xtrace ----------------------------------------------------------------------------------- write data ----------------------------------------------------------------------------------- + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] ++ is_keyring_plugin_in_use scheduled-backup ++ local cluster=scheduled-backup ++ kubectl_bin exec -it scheduled-backup-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ grep -E -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.pcrhWJz4g8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.k45OOSz6HQ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it scheduled-backup-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.pcrhWJz4g8 ++ cat /tmp/tmp.k45OOSz6HQ Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.pcrhWJz4g8 /tmp/tmp.k45OOSz6HQ ++ return 0 + [[ -n '' ]] + run_mysql 'CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' '-h scheduled-backup-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=CREATE DATABASE IF NOT EXISTS myApp; use myApp; CREATE TABLE IF NOT EXISTS myApp (id int PRIMARY KEY) ;' + local 'uri=-h scheduled-backup-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.YNdqhCqPYu +++ mktemp ++ local LAST_ERR=/tmp/tmp.ba8Cjrm52m ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.YNdqhCqPYu ++ cat /tmp/tmp.ba8Cjrm52m ++ rm /tmp/tmp.YNdqhCqPYu /tmp/tmp.ba8Cjrm52m ++ return 0 + client_pod=pxc-client-857d976497-975t6 + wait_pod pxc-client-857d976497-975t6 + local pod=pxc-client-857d976497-975t6 + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-975t6 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-975t6 condition met waiting for pod/pxc-client-857d976497-975t6 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + run_mysql 'INSERT myApp.myApp (id) VALUES (100500)' '-h scheduled-backup-proxysql -uroot -p'\''root_password'\'' -P3306' + local 'command=INSERT myApp.myApp (id) VALUES (100500)' + local 'uri=-h scheduled-backup-proxysql -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.4ClaSMCtzT +++ mktemp ++ local LAST_ERR=/tmp/tmp.iXvLcsx08E ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.4ClaSMCtzT ++ cat /tmp/tmp.iXvLcsx08E ++ rm /tmp/tmp.4ClaSMCtzT /tmp/tmp.iXvLcsx08E ++ return 0 + client_pod=pxc-client-857d976497-975t6 + wait_pod pxc-client-857d976497-975t6 + local pod=pxc-client-857d976497-975t6 + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-975t6 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-857d976497-975t6 condition met waiting for pod/pxc-client-857d976497-975t6 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + sleep 30 ++ seq 0 2 + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-0.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-0.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-0.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-0.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.9P0CiQXwFm +++ mktemp ++ local LAST_ERR=/tmp/tmp.AcMa1djNrZ ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.9P0CiQXwFm ++ cat /tmp/tmp.AcMa1djNrZ ++ rm /tmp/tmp.9P0CiQXwFm /tmp/tmp.AcMa1djNrZ ++ return 0 + client_pod=pxc-client-857d976497-975t6 + wait_pod pxc-client-857d976497-975t6 + local pod=pxc-client-857d976497-975t6 + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-975t6 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-975t6 condition met waiting for pod/pxc-client-857d976497-975t6 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dLUqAPrfGo/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql /tmp/tmp.dLUqAPrfGo/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-1.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-1.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-1.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-1.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.w0DrsDgnsp +++ mktemp ++ local LAST_ERR=/tmp/tmp.7leiS8XeFB ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.w0DrsDgnsp ++ cat /tmp/tmp.7leiS8XeFB ++ rm /tmp/tmp.w0DrsDgnsp /tmp/tmp.7leiS8XeFB ++ return 0 + client_pod=pxc-client-857d976497-975t6 + wait_pod pxc-client-857d976497-975t6 + local pod=pxc-client-857d976497-975t6 + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-975t6 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container= + set +o xtrace pod/pxc-client-857d976497-975t6 condition met waiting for pod/pxc-client-857d976497-975t6 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dLUqAPrfGo/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql /tmp/tmp.dLUqAPrfGo/select-1.sql + for i in '$(seq 0 $((size - 1)))' + compare_mysql_cmd select-1 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-2.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local command_id=select-1 + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-2.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local postfix= + local expected_result=/mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.4 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 8\.0 ]] + [[ perconalab/percona-xtradb-cluster-operator:main-pxc5.7 =~ 5\.7 ]] + [[ -f /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1-57.sql ]] + run_mysql 'SELECT * from myApp.myApp;' '-h scheduled-backup-pxc-2.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' + local 'command=SELECT * from myApp.myApp;' + local 'uri=-h scheduled-backup-pxc-2.scheduled-backup-pxc -uroot -p'\''root_password'\'' -P3306' ++ get_client_pod ++ kubectl_bin get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' +++ mktemp ++ local LAST_OUT=/tmp/tmp.6yr2CbJuWA +++ mktemp ++ local LAST_ERR=/tmp/tmp.RQEdZa2PWm ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pods --selector=name=pxc-client -o 'jsonpath={.items[].metadata.name}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.6yr2CbJuWA ++ cat /tmp/tmp.RQEdZa2PWm ++ rm /tmp/tmp.6yr2CbJuWA /tmp/tmp.RQEdZa2PWm ++ return 0 + client_pod=pxc-client-857d976497-975t6 + wait_pod pxc-client-857d976497-975t6 + local pod=pxc-client-857d976497-975t6 + local max_retry=480 + local ns= ++ echo pxc-client-857d976497-975t6 ++ grep -E '^(pxc|proxysql)$' ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' + local container= + set +o xtrace pod/pxc-client-857d976497-975t6 condition met waiting for pod/pxc-client-857d976497-975t6 to become ReadyDefaulted container "pxc-client" out of: pxc-client, backup .Ok + set +o xtrace + '[' '!' -s /tmp/tmp.dLUqAPrfGo/select-1.sql ']' + [[ 0 -eq 0 ]] + diff -u /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/compare/select-1.sql /tmp/tmp.dLUqAPrfGo/select-1.sql ++ is_keyring_plugin_in_use scheduled-backup ++ local cluster=scheduled-backup ++ kubectl_bin exec -it scheduled-backup-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ grep -E -o 'early-plugin-load=keyring_\w+.so' +++ mktemp ++ local LAST_OUT=/tmp/tmp.vyxJjHVuLN +++ mktemp ++ local LAST_ERR=/tmp/tmp.AvW9JPKHtI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl exec -it scheduled-backup-pxc-0 -c pxc -- bash -c 'cat /etc/mysql/node.cnf' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.vyxJjHVuLN ++ cat /tmp/tmp.AvW9JPKHtI Unable to use a TTY - input is not a terminal or the right kind of file ++ rm /tmp/tmp.vyxJjHVuLN /tmp/tmp.AvW9JPKHtI ++ return 0 + '[' '' ']' + sleep 20 + desc 'add backups schedule for pvc storage' + set +o xtrace ----------------------------------------------------------------------------------- add backups schedule for pvc storage ----------------------------------------------------------------------------------- ++ kubectl_bin config current-context +++ mktemp ++ local LAST_OUT=/tmp/tmp.d03FcCP1F8 +++ mktemp ++ local LAST_ERR=/tmp/tmp.Rfty23tZQN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl config current-context ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.d03FcCP1F8 ++ cat /tmp/tmp.Rfty23tZQN ++ rm /tmp/tmp.d03FcCP1F8 /tmp/tmp.Rfty23tZQN ++ return 0 + kubectl_bin config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=scheduled-backup-28123 ++ mktemp + local LAST_OUT=/tmp/tmp.alWFLvuayL ++ mktemp + local LAST_ERR=/tmp/tmp.zDqVGVYrud + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl config set-context gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4 --namespace=scheduled-backup-28123 + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.alWFLvuayL Context "gke_cloud-dev-112233_us-central1-a_jen-pxc-2076-09e03a97-6-cluster4" modified. + cat /tmp/tmp.zDqVGVYrud + rm /tmp/tmp.alWFLvuayL /tmp/tmp.zDqVGVYrud + return 0 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-pvc.yml + '[' -z '' ']' + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-pvc.yml + kubectl_bin apply -f - + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-pvc.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + local LAST_OUT=/tmp/tmp.2rGfxZQWv3 + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-09e03a97#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' ++ mktemp + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-28123~ + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' + local LAST_ERR=/tmp/tmp.MYQUdGeiqs + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#apply:.*#apply: Never#' + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.2rGfxZQWv3 perconaxtradbcluster.pxc.percona.com/scheduled-backup configured + cat /tmp/tmp.MYQUdGeiqs + rm /tmp/tmp.2rGfxZQWv3 /tmp/tmp.MYQUdGeiqs + return 0 + label_node ++ kubectl_bin get nodes --no-headers=true ++ head -n1 +++ mktemp ++ awk '{print $1}' ++ grep -v master ++ local LAST_OUT=/tmp/tmp.o1WmTn7EZv +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZbakejGkF6 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get nodes --no-headers=true ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.o1WmTn7EZv ++ cat /tmp/tmp.ZbakejGkF6 ++ rm /tmp/tmp.o1WmTn7EZv /tmp/tmp.ZbakejGkF6 ++ return 0 + LABELED_NODE=gke-jen-pxc-2076-09e03a9-default-pool-26c8620f-471l + kubectl_bin label nodes gke-jen-pxc-2076-09e03a9-default-pool-26c8620f-471l backupWorker=True --overwrite ++ mktemp + local LAST_OUT=/tmp/tmp.3UwrFWsarM ++ mktemp + local LAST_ERR=/tmp/tmp.kk4RjBGnaR + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl label nodes gke-jen-pxc-2076-09e03a9-default-pool-26c8620f-471l backupWorker=True --overwrite + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.3UwrFWsarM node/gke-jen-pxc-2076-09e03a9-default-pool-26c8620f-471l not labeled + cat /tmp/tmp.kk4RjBGnaR + rm /tmp/tmp.3UwrFWsarM /tmp/tmp.kk4RjBGnaR + return 0 + sleep 61 + apply_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-disable.yml + '[' -z '' ']' + kubectl_bin apply -f - + cat_config /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-disable.yml + cat /mnt/jenkins/workspace/cloud-pxc-operator_PR-2076/e2e-tests/scheduled-backup/conf/scheduled-backup-disable.yml ++ mktemp + /usr/bin/sed -e 's#image:.*-pxc\([0-9]*.[0-9]*\)\{0,1\}$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + local LAST_OUT=/tmp/tmp.88xt3TamOA + /usr/bin/sed -e 's#image:.*\/percona-xtradb-cluster:.*$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7#' + /usr/bin/sed -e 's#image:.*-pmm$#image: perconalab/pmm-client:dev-latest#' + /usr/bin/sed -e 's#apiVersion: pxc.percona.com/v.*$#apiVersion: pxc.percona.com/v1#' + /usr/bin/sed -e 's#image:.*-backup$#image: perconalab/percona-xtradb-cluster-operator:main-pxc5.7-backup#' + /usr/bin/sed -e 's#image:.*-logcollector$#image: perconalab/percona-xtradb-cluster-operator:main-logcollector#' + /usr/bin/sed -e s~minio-service.#namespace~minio-service.scheduled-backup-28123~ + /usr/bin/sed -e 's#image:.*-init$#image: perconalab/percona-xtradb-cluster-operator:PR-2076-09e03a97#' ++ mktemp + /usr/bin/sed -e 's#image:.*-proxysql$#image: perconalab/percona-xtradb-cluster-operator:main-proxysql#' + local LAST_ERR=/tmp/tmp.BLpLEClue3 + local exit_status=0 + /usr/bin/sed -e 's#image:.*-haproxy$#image: perconalab/percona-xtradb-cluster-operator:main-haproxy#' ++ seq 0 2 + /usr/bin/sed -e 's#apply:.*#apply: Never#' + for i in '$(seq 0 2)' + set +e + kubectl apply -f - + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.88xt3TamOA perconaxtradbcluster.pxc.percona.com/scheduled-backup configured + cat /tmp/tmp.BLpLEClue3 + rm /tmp/tmp.88xt3TamOA /tmp/tmp.BLpLEClue3 + return 0 + wait_all_backups ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' +++ mktemp ++ wc -l ++ local LAST_OUT=/tmp/tmp.Uh448gbk6w +++ mktemp ++ local LAST_ERR=/tmp/tmp.wNyerPsS85 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Uh448gbk6w ++ cat /tmp/tmp.wNyerPsS85 ++ rm /tmp/tmp.Uh448gbk6w /tmp/tmp.wNyerPsS85 ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.IRhzPVeKN0 +++ mktemp ++ local LAST_ERR=/tmp/tmp.pZrDQNLgFp ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IRhzPVeKN0 ++ cat /tmp/tmp.pZrDQNLgFp ++ rm /tmp/tmp.IRhzPVeKN0 /tmp/tmp.pZrDQNLgFp ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.1OdZpN3LNB ++ mktemp + local LAST_ERR=/tmp/tmp.CEygNhDUa2 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.1OdZpN3LNB NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 19s + cat /tmp/tmp.CEygNhDUa2 + rm /tmp/tmp.1OdZpN3LNB /tmp/tmp.CEygNhDUa2 + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.kSbSzxJ1rU ++ mktemp + local LAST_ERR=/tmp/tmp.w8gDP7xNf3 + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.kSbSzxJ1rU NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 7m58s pxc-client-857d976497-975t6 2/2 Running 0 6m45s scheduled-backup-proxysql-0 3/3 Running 0 6m43s scheduled-backup-proxysql-1 3/3 Running 0 6m27s scheduled-backup-pxc-0 1/1 Running 0 6m43s scheduled-backup-pxc-1 1/1 Running 0 5m21s scheduled-backup-pxc-2 1/1 Running 0 4m7s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 1/1 Running 0 20s + cat /tmp/tmp.w8gDP7xNf3 + rm /tmp/tmp.kSbSzxJ1rU /tmp/tmp.w8gDP7xNf3 + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.JKuWjri1QB +++ mktemp ++ local LAST_ERR=/tmp/tmp.SRuJ70Vkct ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JKuWjri1QB ++ cat /tmp/tmp.SRuJ70Vkct ++ rm /tmp/tmp.JKuWjri1QB /tmp/tmp.SRuJ70Vkct ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.WZtz7QKWCA +++ mktemp ++ local LAST_ERR=/tmp/tmp.9PX65WHsSP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.WZtz7QKWCA ++ cat /tmp/tmp.9PX65WHsSP ++ rm /tmp/tmp.WZtz7QKWCA /tmp/tmp.9PX65WHsSP ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.h5cnVf74gS ++ mktemp + local LAST_ERR=/tmp/tmp.at8YpoB9oZ + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.h5cnVf74gS NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 57s + cat /tmp/tmp.at8YpoB9oZ + rm /tmp/tmp.h5cnVf74gS /tmp/tmp.at8YpoB9oZ + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.otW603EJ3l ++ mktemp + local LAST_ERR=/tmp/tmp.UnI5xJNrXd + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.otW603EJ3l NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 8m35s pxc-client-857d976497-975t6 2/2 Running 0 7m22s scheduled-backup-proxysql-0 3/3 Running 0 7m20s scheduled-backup-proxysql-1 3/3 Running 0 7m4s scheduled-backup-pxc-0 1/1 Running 0 7m20s scheduled-backup-pxc-1 1/1 Running 0 5m58s scheduled-backup-pxc-2 1/1 Running 0 4m44s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 57s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 26s + cat /tmp/tmp.UnI5xJNrXd + rm /tmp/tmp.otW603EJ3l /tmp/tmp.UnI5xJNrXd + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ wc -l ++ grep -vE ':Succeeded|:Failed' +++ mktemp ++ local LAST_OUT=/tmp/tmp.W4Qafmy19X +++ mktemp ++ local LAST_ERR=/tmp/tmp.mKwU1xrlOl ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.W4Qafmy19X ++ cat /tmp/tmp.mKwU1xrlOl ++ rm /tmp/tmp.W4Qafmy19X /tmp/tmp.mKwU1xrlOl ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed +++ mktemp ++ local LAST_OUT=/tmp/tmp.I2BNTEK66c +++ mktemp ++ local LAST_ERR=/tmp/tmp.Jrwxei0oxI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ wc -l ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.I2BNTEK66c ++ cat /tmp/tmp.Jrwxei0oxI ++ rm /tmp/tmp.I2BNTEK66c /tmp/tmp.Jrwxei0oxI ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.YNFfLP8E7h ++ mktemp + local LAST_ERR=/tmp/tmp.jA1TDOg2tu + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.YNFfLP8E7h NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 96s + cat /tmp/tmp.jA1TDOg2tu + rm /tmp/tmp.YNFfLP8E7h /tmp/tmp.jA1TDOg2tu + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.XG7pG3CWAo ++ mktemp + local LAST_ERR=/tmp/tmp.brNdxKZjEa + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.XG7pG3CWAo NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 9m15s pxc-client-857d976497-975t6 2/2 Running 0 8m2s scheduled-backup-proxysql-0 3/3 Running 0 8m scheduled-backup-proxysql-1 3/3 Running 0 7m44s scheduled-backup-pxc-0 1/1 Running 0 8m scheduled-backup-pxc-1 1/1 Running 0 6m38s scheduled-backup-pxc-2 1/1 Running 0 5m24s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 97s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 66s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 33s + cat /tmp/tmp.brNdxKZjEa + rm /tmp/tmp.XG7pG3CWAo /tmp/tmp.brNdxKZjEa + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.JJgEBKj0h5 +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZHuymKzaIf ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JJgEBKj0h5 ++ cat /tmp/tmp.ZHuymKzaIf ++ rm /tmp/tmp.JJgEBKj0h5 /tmp/tmp.ZHuymKzaIf ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.CtvoezGo9j +++ mktemp ++ local LAST_ERR=/tmp/tmp.tppbu3Lbj3 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.CtvoezGo9j ++ cat /tmp/tmp.tppbu3Lbj3 ++ rm /tmp/tmp.CtvoezGo9j /tmp/tmp.tppbu3Lbj3 ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.QYLC7SqF4U ++ mktemp + local LAST_ERR=/tmp/tmp.7TYRXc4qYB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QYLC7SqF4U NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 2m6s + cat /tmp/tmp.7TYRXc4qYB + rm /tmp/tmp.QYLC7SqF4U /tmp/tmp.7TYRXc4qYB + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.D5OHv7Skje ++ mktemp + local LAST_ERR=/tmp/tmp.QqhNnR8jWL + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.D5OHv7Skje NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 9m44s pxc-client-857d976497-975t6 2/2 Running 0 8m31s scheduled-backup-proxysql-0 3/3 Running 0 8m29s scheduled-backup-proxysql-1 3/3 Running 0 8m13s scheduled-backup-pxc-0 1/1 Running 0 8m29s scheduled-backup-pxc-1 1/1 Running 0 7m7s scheduled-backup-pxc-2 1/1 Running 0 5m53s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 2m6s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Init:0/1 0 8s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 95s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 62s + cat /tmp/tmp.QqhNnR8jWL + rm /tmp/tmp.D5OHv7Skje /tmp/tmp.QqhNnR8jWL + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.Rm3eek3jE2 +++ mktemp ++ local LAST_ERR=/tmp/tmp.TUHnRvSmC9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Rm3eek3jE2 ++ cat /tmp/tmp.TUHnRvSmC9 ++ rm /tmp/tmp.Rm3eek3jE2 /tmp/tmp.TUHnRvSmC9 ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.ZYrc1ouuvI +++ mktemp ++ local LAST_ERR=/tmp/tmp.ZqF4QpfmhM ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.ZYrc1ouuvI ++ cat /tmp/tmp.ZqF4QpfmhM ++ rm /tmp/tmp.ZYrc1ouuvI /tmp/tmp.ZqF4QpfmhM ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.hPw3nKAQf8 ++ mktemp + local LAST_ERR=/tmp/tmp.fbCHQVyFVF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.hPw3nKAQf8 NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 2m40s + cat /tmp/tmp.fbCHQVyFVF + rm /tmp/tmp.hPw3nKAQf8 /tmp/tmp.fbCHQVyFVF + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.4unYHvOHxY ++ mktemp + local LAST_ERR=/tmp/tmp.oW9AKo0NRS + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.4unYHvOHxY NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 10m pxc-client-857d976497-975t6 2/2 Running 0 9m5s scheduled-backup-proxysql-0 3/3 Running 0 9m3s scheduled-backup-proxysql-1 3/3 Running 0 8m47s scheduled-backup-pxc-0 1/1 Running 0 9m3s scheduled-backup-pxc-1 1/1 Running 0 7m41s scheduled-backup-pxc-2 1/1 Running 0 6m27s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 2m40s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 42s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 2m9s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 96s + cat /tmp/tmp.oW9AKo0NRS + rm /tmp/tmp.4unYHvOHxY /tmp/tmp.oW9AKo0NRS + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.JKtXowLCvy +++ mktemp ++ local LAST_ERR=/tmp/tmp.d2vj5l9I92 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.JKtXowLCvy ++ cat /tmp/tmp.d2vj5l9I92 ++ rm /tmp/tmp.JKtXowLCvy /tmp/tmp.d2vj5l9I92 ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.Tluy5qchID +++ mktemp ++ local LAST_ERR=/tmp/tmp.N5w5b7Oa7J ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.Tluy5qchID ++ cat /tmp/tmp.N5w5b7Oa7J ++ rm /tmp/tmp.Tluy5qchID /tmp/tmp.N5w5b7Oa7J ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.xN2PwrMaLx ++ mktemp + local LAST_ERR=/tmp/tmp.fXChiRGNta + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.xN2PwrMaLx NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 3m9s + cat /tmp/tmp.fXChiRGNta + rm /tmp/tmp.xN2PwrMaLx /tmp/tmp.fXChiRGNta + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.ETW4ovFSmI ++ mktemp + local LAST_ERR=/tmp/tmp.Htkjis3eTW + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.ETW4ovFSmI NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 10m pxc-client-857d976497-975t6 2/2 Running 0 9m35s scheduled-backup-proxysql-0 3/3 Running 0 9m33s scheduled-backup-proxysql-1 3/3 Running 0 9m17s scheduled-backup-pxc-0 1/1 Running 0 9m33s scheduled-backup-pxc-1 1/1 Running 0 8m11s scheduled-backup-pxc-2 1/1 Running 0 6m57s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 3m10s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 72s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 2m39s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 2m6s + cat /tmp/tmp.Htkjis3eTW + rm /tmp/tmp.ETW4ovFSmI /tmp/tmp.Htkjis3eTW + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.AP0XYUACzu +++ mktemp ++ local LAST_ERR=/tmp/tmp.l0wPUgZogP ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.AP0XYUACzu ++ cat /tmp/tmp.l0wPUgZogP ++ rm /tmp/tmp.AP0XYUACzu /tmp/tmp.l0wPUgZogP ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.g41ahifZao +++ mktemp ++ local LAST_ERR=/tmp/tmp.esnFj0cIWN ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.g41ahifZao ++ cat /tmp/tmp.esnFj0cIWN ++ rm /tmp/tmp.g41ahifZao /tmp/tmp.esnFj0cIWN ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.q4zjokOMLT ++ mktemp + local LAST_ERR=/tmp/tmp.23yPUUGKwA + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.q4zjokOMLT NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 3m43s + cat /tmp/tmp.23yPUUGKwA + rm /tmp/tmp.q4zjokOMLT /tmp/tmp.23yPUUGKwA + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.nnbj73rGAT ++ mktemp + local LAST_ERR=/tmp/tmp.YavLzbdc8t + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.nnbj73rGAT NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 11m pxc-client-857d976497-975t6 2/2 Running 0 10m scheduled-backup-proxysql-0 3/3 Running 0 10m scheduled-backup-proxysql-1 3/3 Running 0 9m51s scheduled-backup-pxc-0 1/1 Running 0 10m scheduled-backup-pxc-1 1/1 Running 0 8m45s scheduled-backup-pxc-2 1/1 Running 0 7m31s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 3m44s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 106s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 3m13s xb-cron-scheduled-backup-pvc-202511286289-q6fav-wnn6j 1/1 Running 0 12s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 2m40s + cat /tmp/tmp.YavLzbdc8t + rm /tmp/tmp.nnbj73rGAT /tmp/tmp.YavLzbdc8t + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.IUi2MztzHC +++ mktemp ++ local LAST_ERR=/tmp/tmp.yt36igsBJO ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.IUi2MztzHC ++ cat /tmp/tmp.yt36igsBJO ++ rm /tmp/tmp.IUi2MztzHC /tmp/tmp.yt36igsBJO ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.oUKnUWsTcQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.k7jPSveME9 ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.oUKnUWsTcQ ++ cat /tmp/tmp.k7jPSveME9 ++ rm /tmp/tmp.oUKnUWsTcQ /tmp/tmp.k7jPSveME9 ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.mQcTItxb4M ++ mktemp + local LAST_ERR=/tmp/tmp.SDJpTM85vv + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.mQcTItxb4M NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 4m18s + cat /tmp/tmp.SDJpTM85vv + rm /tmp/tmp.mQcTItxb4M /tmp/tmp.SDJpTM85vv + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.8W72GTfNCE ++ mktemp + local LAST_ERR=/tmp/tmp.oWRbuaOYtF + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.8W72GTfNCE NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 11m pxc-client-857d976497-975t6 2/2 Running 0 10m scheduled-backup-proxysql-0 3/3 Running 0 10m scheduled-backup-proxysql-1 3/3 Running 0 10m scheduled-backup-pxc-0 1/1 Running 0 10m scheduled-backup-pxc-1 1/1 Running 0 9m20s scheduled-backup-pxc-2 1/1 Running 0 8m6s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 4m19s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 2m21s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 3m48s xb-cron-scheduled-backup-pvc-202511286289-q6fav-wnn6j 0/1 Error 0 47s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 3m15s + cat /tmp/tmp.oWRbuaOYtF + rm /tmp/tmp.8W72GTfNCE /tmp/tmp.oWRbuaOYtF + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.o00cwAhyb6 +++ mktemp ++ local LAST_ERR=/tmp/tmp.uGL9gBNWgI ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.o00cwAhyb6 ++ cat /tmp/tmp.uGL9gBNWgI ++ rm /tmp/tmp.o00cwAhyb6 /tmp/tmp.uGL9gBNWgI ++ return 0 + [[ 1 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ wc -l ++ grep :Failed +++ mktemp ++ local LAST_OUT=/tmp/tmp.aNGruhK3wJ +++ mktemp ++ local LAST_ERR=/tmp/tmp.aXseORuaNd ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.aNGruhK3wJ ++ cat /tmp/tmp.aXseORuaNd ++ rm /tmp/tmp.aNGruhK3wJ /tmp/tmp.aXseORuaNd ++ return 0 + [[ 0 -eq 0 ]] + wait_for_running scheduled-backup-pxc 3 1 + local name=scheduled-backup-pxc + let last_pod=2 + local max_retry=1 + desc 'wait for running cluster' + set +o xtrace ----------------------------------------------------------------------------------- wait for running cluster ----------------------------------------------------------------------------------- ++ seq 0 2 + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-0 1 + local pod=scheduled-backup-pxc-0 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-0 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-0 condition met waiting for pod/scheduled-backup-pxc-0 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-1 1 + local pod=scheduled-backup-pxc-1 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-1 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-1 condition met waiting for pod/scheduled-backup-pxc-1 to become Ready.Ok + for i in '$(seq 0 $last_pod)' + wait_pod scheduled-backup-pxc-2 1 + local pod=scheduled-backup-pxc-2 + local max_retry=1 + local ns= ++ echo scheduled-backup-pxc-2 ++ /usr/bin/sed -E 's/.*-(pxc|proxysql)-[0-9]/\1/' ++ grep -E '^(pxc|proxysql)$' + local container=pxc + set +o xtrace pod/scheduled-backup-pxc-2 condition met waiting for pod/scheduled-backup-pxc-2 to become Ready.Ok + echo + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.VMGodTulRw ++ mktemp + local LAST_ERR=/tmp/tmp.5Z3P6KGkrn + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.VMGodTulRw NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Starting 4m53s + cat /tmp/tmp.5Z3P6KGkrn + rm /tmp/tmp.VMGodTulRw /tmp/tmp.5Z3P6KGkrn + return 0 + echo + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.QWBvC1vVbW ++ mktemp + local LAST_ERR=/tmp/tmp.dqDTpLx5MB + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.QWBvC1vVbW NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 12m pxc-client-857d976497-975t6 2/2 Running 0 11m scheduled-backup-proxysql-0 3/3 Running 0 11m scheduled-backup-proxysql-1 3/3 Running 0 11m scheduled-backup-pxc-0 1/1 Running 0 11m scheduled-backup-pxc-1 1/1 Running 0 9m55s scheduled-backup-pxc-2 1/1 Running 0 8m41s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 4m54s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 2m56s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 4m23s xb-cron-scheduled-backup-pvc-202511286289-q6fav-wnn6j 0/1 Error 0 82s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 3m50s + cat /tmp/tmp.dqDTpLx5MB + rm /tmp/tmp.QWBvC1vVbW /tmp/tmp.dqDTpLx5MB + return 0 + sleep 20 ++ get_running_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep -vE ':Succeeded|:Failed' ++ wc -l +++ mktemp ++ local LAST_OUT=/tmp/tmp.LMoVqFEORx +++ mktemp ++ local LAST_ERR=/tmp/tmp.vvOahQHL4l ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.LMoVqFEORx ++ cat /tmp/tmp.vvOahQHL4l ++ rm /tmp/tmp.LMoVqFEORx /tmp/tmp.vvOahQHL4l ++ return 0 + [[ 0 -ne 0 ]] ++ get_failed_backups_amount ++ kubectl_bin get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ grep :Failed +++ mktemp ++ wc -l ++ local LAST_OUT=/tmp/tmp.lUTgw12IoQ +++ mktemp ++ local LAST_ERR=/tmp/tmp.ym0hOHtBDC ++ local exit_status=0 +++ seq 0 2 ++ for i in '$(seq 0 2)' ++ set +e ++ kubectl get pxc-backup '-o=jsonpath={range .items[*]}{.metadata.name}{":"}{.spec.storageName}{":"}{.status.state}{"\n"}{end}' ++ exit_status=0 ++ set -e ++ '[' 0 '!=' 0 ']' ++ break ++ cat /tmp/tmp.lUTgw12IoQ ++ cat /tmp/tmp.ym0hOHtBDC ++ rm /tmp/tmp.lUTgw12IoQ /tmp/tmp.ym0hOHtBDC ++ return 0 + [[ 1 -gt 0 ]] + echo 'One or more backups have been failed!\n' One or more backups have been failed!\n + desc 'LIST OF BACKUPS' + set +o xtrace ----------------------------------------------------------------------------------- LIST OF BACKUPS ----------------------------------------------------------------------------------- + kubectl_bin get pxc-backup ++ mktemp + local LAST_OUT=/tmp/tmp.c9uwueE6ih ++ mktemp + local LAST_ERR=/tmp/tmp.6BWh0OH4Gt + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pxc-backup + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.c9uwueE6ih NAME CLUSTER STORAGE DESTINATION STATUS COMPLETED AGE cron-scheduled-backup-pvc-202511286289-q6fav scheduled-backup pvc pvc/xb-cron-scheduled-backup-pvc-202511286289-q6fav Failed 5m16s + cat /tmp/tmp.6BWh0OH4Gt + rm /tmp/tmp.c9uwueE6ih /tmp/tmp.6BWh0OH4Gt + return 0 + desc 'LIST OF PODS' + set +o xtrace ----------------------------------------------------------------------------------- LIST OF PODS ----------------------------------------------------------------------------------- + kubectl_bin get pods ++ mktemp + local LAST_OUT=/tmp/tmp.PAaBl1lUJy ++ mktemp + local LAST_ERR=/tmp/tmp.N5YzsLBrUj + local exit_status=0 ++ seq 0 2 + for i in '$(seq 0 2)' + set +e + kubectl get pods + exit_status=0 + set -e + '[' 0 '!=' 0 ']' + break + cat /tmp/tmp.PAaBl1lUJy NAME READY STATUS RESTARTS AGE minio-service-55fcc5d75f-f5bbt 1/1 Running 0 12m pxc-client-857d976497-975t6 2/2 Running 0 11m scheduled-backup-proxysql-0 3/3 Running 0 11m scheduled-backup-proxysql-1 3/3 Running 0 11m scheduled-backup-pxc-0 1/1 Running 0 11m scheduled-backup-pxc-1 1/1 Running 0 10m scheduled-backup-pxc-2 1/1 Running 0 9m4s xb-cron-scheduled-backup-pvc-202511286289-q6fav-28vwb 0/1 Error 0 5m17s xb-cron-scheduled-backup-pvc-202511286289-q6fav-sctwb 0/1 Error 0 3m19s xb-cron-scheduled-backup-pvc-202511286289-q6fav-swbfk 0/1 Error 0 4m46s xb-cron-scheduled-backup-pvc-202511286289-q6fav-wnn6j 0/1 Error 0 105s xb-cron-scheduled-backup-pvc-202511286289-q6fav-xs6mv 0/1 Error 0 4m13s + cat /tmp/tmp.N5YzsLBrUj + rm /tmp/tmp.PAaBl1lUJy /tmp/tmp.N5YzsLBrUj + return 0 + exit 1