=== RUN kuttl harness.go:460: starting setup harness.go:258: running tests using configured kubeconfig. harness.go:281: Successful connection to cluster at: https://35.232.248.139 harness.go:366: running tests harness.go:77: going to run test suite with timeout of 180 seconds for each step harness.go:378: testsuite: e2e-tests/tests has 53 tests === RUN kuttl/harness === RUN kuttl/harness/demand-backup-retry === PAUSE kuttl/harness/demand-backup-retry === CONT kuttl/harness/demand-backup-retry logger.go:42: 12:18:46 | demand-backup-retry | Ignoring "conf": does not begin with a number followed by a dash. logger.go:42: 12:18:46 | demand-backup-retry | Creating namespace "kuttl-test-fine-meerkat" logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_tls_cluster_secrets deploy_client if has_minio_storage; then deploy_minio "7G" fi] logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | + source ../../functions logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ realpath ../../.. logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++++ pwd logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++ test_name=demand-backup-retry logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ [[ -z 8.4 ]] logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ MYSQL_VERSION=8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++++ which gdate logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | ++++ which date logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ export date=/usr/sbin/date logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ oc get projects logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ : logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ kubectl get nodes logger.go:42: 12:18:46 | demand-backup-retry/0-deploy-operator | +++ grep '^minikube' logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | +++ which gsed logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | +++ which sed logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | ++ sed=/usr/sbin/sed logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + init_temp_dir logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + rm -rf /tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + apply_s3_storage_secrets logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + apply_minio_secret logger.go:42: 12:18:47 | demand-backup-retry/0-deploy-operator | + kubectl -n kuttl-test-fine-meerkat apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/minio-secret.yml logger.go:42: 12:18:48 | demand-backup-retry/0-deploy-operator | secret/minio-secret created logger.go:42: 12:18:48 | demand-backup-retry/0-deploy-operator | + kubectl -n kuttl-test-fine-meerkat apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/cloud-secret.yml logger.go:42: 12:18:49 | demand-backup-retry/0-deploy-operator | secret/aws-s3-secret created logger.go:42: 12:18:49 | demand-backup-retry/0-deploy-operator | secret/do-spaces-secret created logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | secret/gcp-cs-secret created logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | secret/azure-secret created logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + deploy_operator logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + destroy_operator logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + true logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 12:18:50 | demand-backup-retry/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + true logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + create_namespace ps-operator logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + local namespace=ps-operator logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + [[ -n '' ]] logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 12:18:51 | demand-backup-retry/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 12:18:52 | demand-backup-retry/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 12:18:52 | demand-backup-retry/0-deploy-operator | namespace/ps-operator created logger.go:42: 12:18:52 | demand-backup-retry/0-deploy-operator | + apply_crd logger.go:42: 12:18:52 | demand-backup-retry/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/crd.yaml logger.go:42: 12:18:53 | demand-backup-retry/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | + apply_rbac logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | + local rbac_file logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | + rbac_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-rbac.yaml logger.go:42: 12:18:54 | demand-backup-retry/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-rbac.yaml logger.go:42: 12:18:56 | demand-backup-retry/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 12:18:56 | demand-backup-retry/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 12:18:56 | demand-backup-retry/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + local operator_file logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + operator_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-operator.yaml logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "VERBOSE"' logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 12:18:57 | demand-backup-retry/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-1238-7677a7b6"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-operator.yaml logger.go:42: 12:18:59 | demand-backup-retry/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 12:18:59 | demand-backup-retry/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 12:18:59 | demand-backup-retry/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 12:18:59 | demand-backup-retry/0-deploy-operator | + kubectl -n kuttl-test-fine-meerkat apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/ssl-secret.yaml logger.go:42: 12:19:00 | demand-backup-retry/0-deploy-operator | secret/test-ssl created logger.go:42: 12:19:00 | demand-backup-retry/0-deploy-operator | + deploy_client logger.go:42: 12:19:00 | demand-backup-retry/0-deploy-operator | + kubectl -n kuttl-test-fine-meerkat apply -f - logger.go:42: 12:19:00 | demand-backup-retry/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:19:00 | demand-backup-retry/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/client.yaml logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | pod/mysql-client created logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | + has_minio_storage logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | + local name_suffix= logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | + local cr_name=demand-backup-retry logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | ++ get_test_cr demand-backup-retry logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | ++ local cr_name=demand-backup-retry logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | +++ detect_k8s_platform logger.go:42: 12:19:01 | demand-backup-retry/0-deploy-operator | +++ set +x logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ echo gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ local platform=gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ get_platform_alias gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ set +x logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ echo gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ platform_alias=gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ get_storage_alias gke logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ set +x logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | +++ echo default logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ storage_alias=default logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ local default_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ local platform_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ local storage_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ echo /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + local cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + [[ -z /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ yq eval $'\n\t\t(.spec.backup.storages // {})\n\t\t| keys\n\t\t| map(select(test("^minio")))\n\t\t| length > 0\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + local has_minio=true logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + [[ true == true ]] logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + echo 'MinIO enabled in /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml' logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | MinIO enabled in /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + return 0 logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + deploy_minio 7G logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + local storage=7G logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + local access_key logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + local secret_key logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ kubectl -n kuttl-test-fine-meerkat get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ base64 -d logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | + access_key=some-access-key logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ kubectl -n kuttl-test-fine-meerkat get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 12:19:02 | demand-backup-retry/0-deploy-operator | ++ base64 -d logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + secret_key=some-secret-key logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + helm uninstall -n kuttl-test-fine-meerkat minio-service logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + : logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + helm repo remove minio logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | Error: no repositories configured logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + : logger.go:42: 12:19:03 | demand-backup-retry/0-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | "minio" has been added to your repositories logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | +++ printf %q some-access-key logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | ++ printf %q some-access-key logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | +++ printf %q some-secret-key logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | ++ printf %q some-secret-key logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-fine-meerkat --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=7G --set securityContext.enabled=false minio/minio logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + local max=10 logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + local delay=60 logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + shift 2 logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + local n=1 logger.go:42: 12:19:04 | demand-backup-retry/0-deploy-operator | + helm install minio-service -n kuttl-test-fine-meerkat --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=7G --set securityContext.enabled=false minio/minio logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | NAME: minio-service logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | LAST DEPLOYED: Tue May 5 12:19:05 2026 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | NAMESPACE: kuttl-test-fine-meerkat logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | STATUS: deployed logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | REVISION: 1 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | TEST SUITE: None logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | NOTES: logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | minio-service.kuttl-test-fine-meerkat.cluster.local logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-fine-meerkat -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-fine-meerkat logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-fine-meerkat minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-fine-meerkat minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | 3. mc ls minio-service-local logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | ++ kubectl -n kuttl-test-fine-meerkat get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | + MINIO_POD=minio-service-649c5b46f8-h7kf7 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | + wait_pod minio-service-649c5b46f8-h7kf7 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | + local pod=minio-service-649c5b46f8-h7kf7 logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | + local ns=kuttl-test-fine-meerkat logger.go:42: 12:19:40 | demand-backup-retry/0-deploy-operator | + set +o xtrace logger.go:42: 12:19:41 | demand-backup-retry/0-deploy-operator | minio-service-649c5b46f8-h7kf7true logger.go:42: 12:19:41 | demand-backup-retry/0-deploy-operator | + kubectl -n kuttl-test-fine-meerkat run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access-key'\'' AWS_SECRET_ACCESS_KEY='\''some-secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 12:19:44 | demand-backup-retry/0-deploy-operator | make_bucket: operator-testing logger.go:42: 12:19:44 | demand-backup-retry/0-deploy-operator | All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. logger.go:42: 12:19:44 | demand-backup-retry/0-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 12:19:47 | demand-backup-retry/0-deploy-operator | pod "aws-cli" deleted from kuttl-test-fine-meerkat namespace logger.go:42: 12:19:47 | demand-backup-retry/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:19:47 | demand-backup-retry/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:19:48 | demand-backup-retry/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 12:19:48 | demand-backup-retry/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 12:19:48 | demand-backup-retry/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 12:19:48 | demand-backup-retry/0-deploy-operator | ASSERT PASS logger.go:42: 12:19:48 | demand-backup-retry/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | starting test step 2-create-cluster logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + source ../../functions logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ realpath ../../.. logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++++ pwd logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ test_name=demand-backup-retry logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ [[ -z 8.4 ]] logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ MYSQL_VERSION=8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++++ which gdate logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++++ which date logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ export date=/usr/sbin/date logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ date=/usr/sbin/date logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ oc get projects logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ : logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ kubectl get nodes logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ grep '^minikube' logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ which gsed logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | +++ which sed logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ sed=/usr/sbin/sed logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + get_cr logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local name_suffix= logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local image_binlog_server=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + local cr_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | + kubectl -n kuttl-test-fine-meerkat apply -f - logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ detect_k8s_platform logger.go:42: 12:19:48 | demand-backup-retry/2-create-cluster | ++ set +x logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | ++ echo gke logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | + local platform=gke logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | + local cr_name=demand-backup-retry logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | + crs=('/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml') logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | + local crs logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | ++ get_test_cr demand-backup-retry logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | ++ local cr_name=demand-backup-retry logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | +++ detect_k8s_platform logger.go:42: 12:19:49 | demand-backup-retry/2-create-cluster | +++ set +x logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ echo gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ local platform=gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ get_platform_alias gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ set +x logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ echo gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ platform_alias=gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ get_storage_alias gke logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ set +x logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | +++ echo default logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ storage_alias=default logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ local default_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ local platform_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ local storage_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-default.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry-gke.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | ++ echo /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + local test_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + crs+=("$test_cr") logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + [[ gke == minikube ]] logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + yq eval-all $'\n\t\tselect(fileIndex == 0) as $base |\n\t\tselect(fileIndex == 1) as $test |\n\t\t($base * ($test // {})) |\n\t\t.spec.backup.storages = ($test.spec.backup.storages // $base.spec.backup.storages // {}) |\n\t\t.spec.mysql.clusterType = ($test.spec.mysql.clusterType // "async") |\n\t\t.metadata.name = "demand-backup-retry" |\n\t\t.spec.initContainer.image = "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" |\n\t\t.spec.secretsName = ($test.spec.secretsName // null) |\n\t\t.spec.sslSecretName = "test-ssl" |\n\t\t.spec.upgradeOptions.apply = "disabled" |\n\t\t.spec.mysql.gracePeriod = 30 |\n\t\t.spec.orchestrator.enabled = true |\n\t\t.spec.mysql.image = "perconalab/percona-server-mysql-operator:main-psmysql8.4" |\n\t\t.spec.backup.image = "perconalab/percona-server-mysql-operator:main-backup8.4" |\n\t\t.spec.orchestrator.image = "perconalab/percona-server-mysql-operator:main-orchestrator" |\n\t\t.spec.proxy.router.image = "perconalab/percona-server-mysql-operator:main-router8.4" |\n\t\t.spec.toolkit.image = "perconalab/percona-server-mysql-operator:main-toolkit" |\n\t\t.spec.proxy.haproxy.image = "perconalab/percona-server-mysql-operator:main-haproxy" |\n\t\t.spec.pmm.image = "perconalab/pmm-client:3-dev-latest" |\n\t\t.spec.backup.pitr.binlogServer.image="perconalab/percona-binlog-server:0.2.1" |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "kuttl-test-fine-meerkat")\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/demand-backup-retry.yaml logger.go:42: 12:19:50 | demand-backup-retry/2-create-cluster | + cat logger.go:42: 12:19:51 | demand-backup-retry/2-create-cluster | perconaservermysql.ps.percona.com/demand-backup-retry created logger.go:42: 12:23:52 | demand-backup-retry/2-create-cluster | test step completed 2-create-cluster logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | starting test step 3-populate-database logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) # Configure table number and size and use it as a validation after restore yq eval " (.spec.template.spec.containers[].env[] | select(.name == \"MYSQL_HOST\")).value = \"${cluster_name}-haproxy\" | (.spec.template.spec.containers[].env[] | select(.name == \"MYSQL_PASSWORD\")).valueFrom.secretKeyRef.name = \"${cluster_name}-secrets\" | (.spec.template.spec.containers[].env[] | select(.name == \"MYSQL_DB\")).value = \"myDB\" | (.spec.template.spec.containers[].env[] | select(.name == \"TABLE_NUMBER\")).value = \"200\" | (.spec.template.spec.containers[].env[] | select(.name == \"TABLE_SIZE\")).value = \"40000\" | . " ../../conf/sysbench-oltp-write.yaml | kubectl apply -n "${NAMESPACE}" -f -] logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | + source ../../functions logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ realpath ../../.. logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++++ pwd logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ test_name=demand-backup-retry logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ [[ -z 8.4 ]] logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ MYSQL_VERSION=8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export MINIO_VER=5.4.0 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ MINIO_VER=5.4.0 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export VAULT_VER=0.16.1 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ VAULT_VER=0.16.1 logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++++ which gdate logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++++ which date logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ export date=/usr/sbin/date logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ date=/usr/sbin/date logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ oc get projects logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ : logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ kubectl get nodes logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ grep '^minikube' logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ which gsed logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | +++ which sed logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ sed=/usr/sbin/sed logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ get_cluster_name logger.go:42: 12:23:52 | demand-backup-retry/3-populate-database | ++ kubectl -n kuttl-test-fine-meerkat get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:23:53 | demand-backup-retry/3-populate-database | + cluster_name=demand-backup-retry logger.go:42: 12:23:53 | demand-backup-retry/3-populate-database | + yq eval $'\n (.spec.template.spec.containers[].env[] | select(.name == "MYSQL_HOST")).value = "demand-backup-retry-haproxy" |\n (.spec.template.spec.containers[].env[] | select(.name == "MYSQL_PASSWORD")).valueFrom.secretKeyRef.name = "demand-backup-retry-secrets" |\n (.spec.template.spec.containers[].env[] | select(.name == "MYSQL_DB")).value = "myDB" |\n (.spec.template.spec.containers[].env[] | select(.name == "TABLE_NUMBER")).value = "200" |\n (.spec.template.spec.containers[].env[] | select(.name == "TABLE_SIZE")).value = "40000" |\n .\n' ../../conf/sysbench-oltp-write.yaml logger.go:42: 12:23:53 | demand-backup-retry/3-populate-database | + kubectl apply -n kuttl-test-fine-meerkat -f - logger.go:42: 12:23:54 | demand-backup-retry/3-populate-database | job.batch/sysbench-data created logger.go:42: 12:26:13 | demand-backup-retry/3-populate-database | test step completed 3-populate-database logger.go:42: 12:26:13 | demand-backup-retry/4-save-database-data | starting test step 4-save-database-data logger.go:42: 12:26:13 | demand-backup-retry/4-save-database-data | running command: [sh -c set -o errexit source ../../functions cluster_name=$(get_cluster_name) data=$(run_mysql_query_file "conf/data-query.sql" "-h $(get_haproxy_svc $(get_cluster_name))") kubectl create configmap -n "${NAMESPACE}" 04-demand-backup-retry-data --from-literal=data="${data}"] logger.go:42: 12:26:13 | demand-backup-retry/4-save-database-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:13 | demand-backup-retry/4-save-database-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:16 | demand-backup-retry/4-save-database-data | mysql-clienttrue logger.go:42: 12:26:16 | demand-backup-retry/4-save-database-data | ++ cat conf/data-query.sql logger.go:42: 12:26:16 | demand-backup-retry/4-save-database-data | ++ kubectl -n kuttl-test-fine-meerkat exec -i mysql-client -- bash -c 'mysql -sN -h demand-backup-retry-haproxy -uroot -p'\''TiU.^U(v-qf2^CYkVo7'\'' 2>&1' logger.go:42: 12:26:16 | demand-backup-retry/4-save-database-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:26:18 | demand-backup-retry/4-save-database-data | configmap/04-demand-backup-retry-data created logger.go:42: 12:26:18 | demand-backup-retry/4-save-database-data | test step completed 4-save-database-data logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | starting test step 5-create-backup logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Gets first storage defined and run backup storage_name=$(kubectl get ps demand-backup-retry -n ${NAMESPACE} -o yaml \ | yq '(.spec.backup.storages // {}) | keys | .[0]') run_backup demand-backup-retry $storage_name ] logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | + source ../../functions logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ realpath ../../.. logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++++ pwd logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ test_name=demand-backup-retry logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ [[ -z 8.4 ]] logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ MYSQL_VERSION=8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export MINIO_VER=5.4.0 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ MINIO_VER=5.4.0 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export VAULT_VER=0.16.1 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ VAULT_VER=0.16.1 logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++++ which gdate logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++++ which date logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ export date=/usr/sbin/date logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ date=/usr/sbin/date logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ oc get projects logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ : logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ kubectl get nodes logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ grep '^minikube' logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ which gsed logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | +++ which sed logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ sed=/usr/sbin/sed logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ kubectl get ps demand-backup-retry -n kuttl-test-fine-meerkat -o yaml logger.go:42: 12:26:18 | demand-backup-retry/5-create-backup | ++ yq '(.spec.backup.storages // {}) | keys | .[0]' logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + storage_name=minio logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + run_backup demand-backup-retry minio logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + local backup_name=demand-backup-retry logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + local storage_name=minio logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | ++ get_cluster_name logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | ++ kubectl -n kuttl-test-fine-meerkat get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + local cluster_name=demand-backup-retry logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + local prefix= logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + local backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/backup/backup.yaml logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + test_backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/backup/backup.yaml logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf/backup/backup.yaml ]] logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + echo 'Running backup demand-backup-retry using storage minio' logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | Running backup demand-backup-retry using storage minio logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + yq eval $'\n\t\t.metadata.name = "demand-backup-retry" |\n\t\t.spec.storageName = "minio" |\n\t\t.spec.clusterName = "demand-backup-retry"\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/backup/backup.yaml logger.go:42: 12:26:19 | demand-backup-retry/5-create-backup | + kubectl apply -n kuttl-test-fine-meerkat -f - logger.go:42: 12:26:20 | demand-backup-retry/5-create-backup | perconaservermysqlbackup.ps.percona.com/demand-backup-retry created logger.go:42: 12:26:23 | demand-backup-retry/5-create-backup | test step completed 5-create-backup logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | starting test step 6-verify-uploaded-files logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | running command: [sh -c set -o errexit set -o xtrace source ../../functions backup="demand-backup-retry" eval "$( kubectl get ps-backup "$backup" -n "$NAMESPACE" -o json \ | jq -r ' .status.storage.type as $t | (if $t then .status.storage[$t] else {} end) as $s | "storage_type=\($t // "")", "bucket=\($s.bucket // $s.container // "")", "secret_name=\($s.credentialsSecret // "")", "endpoint=\($s.endpointUrl // "")", "region=\($s.region // "")", "destination=\(.status.destination // "")" ' )" prefix="${destination#*://}" prefix="${prefix#*/}" if [[ -z "$destination" ]]; then echo "Backup $backup is missing destination" exit 1 fi backup_exists=false for i in {1..5}; do count="$(count_files_on_storage "$storage_type" "$bucket" "$prefix" "$secret_name" "$region" "$endpoint")" if [[ $count -gt 0 ]]; then echo "Backup files found for: $backup" backup_exists=true break fi echo "Waiting for backup prefix to be available ($i/5): $backup" sleep 5 done if ! $backup_exists; then echo "Backup isn't present in storage: $backup" exit 1 fi] logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | + source ../../functions logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ realpath ../../.. logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++++ pwd logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ test_name=demand-backup-retry logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ [[ -z 8.4 ]] logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ MYSQL_VERSION=8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export MINIO_VER=5.4.0 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ MINIO_VER=5.4.0 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export VAULT_VER=0.16.1 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ VAULT_VER=0.16.1 logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++++ which gdate logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++++ which date logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ export date=/usr/sbin/date logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ date=/usr/sbin/date logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ oc get projects logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ : logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ kubectl get nodes logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ grep '^minikube' logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ which gsed logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | +++ which sed logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ sed=/usr/sbin/sed logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | + backup=demand-backup-retry logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ kubectl get ps-backup demand-backup-retry -n kuttl-test-fine-meerkat -o json logger.go:42: 12:26:23 | demand-backup-retry/6-verify-uploaded-files | ++ jq -r $'\n .status.storage.type as $t |\n (if $t then .status.storage[$t] else {} end) as $s |\n "storage_type=\\($t // "")",\n "bucket=\\($s.bucket // $s.container // "")",\n "secret_name=\\($s.credentialsSecret // "")",\n "endpoint=\\($s.endpointUrl // "")",\n "region=\\($s.region // "")",\n "destination=\\(.status.destination // "")"\n ' logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + eval $'storage_type=s3\nbucket=operator-testing\nsecret_name=minio-secret\nendpoint=http://minio-service.kuttl-test-fine-meerkat:9000\nregion=us-east-1\ndestination=s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full' logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ storage_type=s3 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ bucket=operator-testing logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ secret_name=minio-secret logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ region=us-east-1 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ destination=s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + prefix=operator-testing/demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + prefix=demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + [[ -z s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full ]] logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + backup_exists=false logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | + for i in {1..5} logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ count_files_on_storage s3 operator-testing demand-backup-retry-2026-05-05-12:26:21-full minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local storage_type=s3 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local bucket=operator-testing logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local secret_name=minio-secret logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local region=us-east-1 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ case "$storage_type" in logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ s3_count_files operator-testing demand-backup-retry-2026-05-05-12:26:21-full/ minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local bucket=operator-testing logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local secret_name=minio-secret logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local region=us-east-1 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | +++ run_s3_command minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 s3 ls s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full/ --recursive --summarize logger.go:42: 12:26:24 | demand-backup-retry/6-verify-uploaded-files | +++ awk '/Total Objects/ {print $3}' logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | ++ local output= logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | ++ echo '' logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | + count= logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | + [[ '' -gt 0 ]] logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | + echo 'Waiting for backup prefix to be available (1/5): demand-backup-retry' logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | Waiting for backup prefix to be available (1/5): demand-backup-retry logger.go:42: 12:26:32 | demand-backup-retry/6-verify-uploaded-files | + sleep 5 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | + for i in {1..5} logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ count_files_on_storage s3 operator-testing demand-backup-retry-2026-05-05-12:26:21-full minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local storage_type=s3 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local bucket=operator-testing logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local secret_name=minio-secret logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local region=us-east-1 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ case "$storage_type" in logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ s3_count_files operator-testing demand-backup-retry-2026-05-05-12:26:21-full/ minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local bucket=operator-testing logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local secret_name=minio-secret logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local region=us-east-1 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | +++ run_s3_command minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 s3 ls s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full/ --recursive --summarize logger.go:42: 12:26:37 | demand-backup-retry/6-verify-uploaded-files | +++ awk '/Total Objects/ {print $3}' logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | ++ local output=397 logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | ++ echo 397 logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + count=397 logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + [[ 397 -gt 0 ]] logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + echo 'Backup files found for: demand-backup-retry' logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | Backup files found for: demand-backup-retry logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + backup_exists=true logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + break logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | + true logger.go:42: 12:26:42 | demand-backup-retry/6-verify-uploaded-files | test step completed 6-verify-uploaded-files logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | starting test step 7-kill-backup-source logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | running command: [sh -c set -o errexit set -o xtrace source ../../functions pod=$(kubectl get ps-backup demand-backup-retry -n ${NAMESPACE} -o jsonpath='{.status.backupSource}' | cut -d'.' -f1) kubectl delete pod "$pod" -n ${NAMESPACE} --force --grace-period=0] logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | + source ../../functions logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ realpath ../../.. logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++++ pwd logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++ test_name=demand-backup-retry logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ [[ -z 8.4 ]] logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ MYSQL_VERSION=8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export MINIO_VER=5.4.0 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ MINIO_VER=5.4.0 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export VAULT_VER=0.16.1 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ VAULT_VER=0.16.1 logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++++ which gdate logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | ++++ which date logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ export date=/usr/sbin/date logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ date=/usr/sbin/date logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ oc get projects logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ : logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ kubectl get nodes logger.go:42: 12:26:42 | demand-backup-retry/7-kill-backup-source | +++ grep '^minikube' logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | +++ which gsed logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | +++ which sed logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | ++ sed=/usr/sbin/sed logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | ++ kubectl get ps-backup demand-backup-retry -n kuttl-test-fine-meerkat -o 'jsonpath={.status.backupSource}' logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | ++ cut -d. -f1 logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | + pod=demand-backup-retry-mysql-1 logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | + kubectl delete pod demand-backup-retry-mysql-1 -n kuttl-test-fine-meerkat --force --grace-period=0 logger.go:42: 12:26:43 | demand-backup-retry/7-kill-backup-source | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:26:44 | demand-backup-retry/7-kill-backup-source | pod "demand-backup-retry-mysql-1" force deleted from kuttl-test-fine-meerkat namespace logger.go:42: 12:26:48 | demand-backup-retry/7-kill-backup-source | test step completed 7-kill-backup-source logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | starting test step 8-verify-backup-completion logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | running command: [sh -c set -o errexit set -o xtrace source ../../functions kubectl wait -n ${NAMESPACE} --for=jsonpath='{.status.state}'=Succeeded \ perconaservermysqlbackups.ps.percona.com/demand-backup-retry --timeout=500s] logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | + source ../../functions logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ realpath ../../.. logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++++ pwd logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++ test_name=demand-backup-retry logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ [[ -z 8.4 ]] logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ MYSQL_VERSION=8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export MINIO_VER=5.4.0 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ MINIO_VER=5.4.0 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export VAULT_VER=0.16.1 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ VAULT_VER=0.16.1 logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++++ which gdate logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++++ which date logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ export date=/usr/sbin/date logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ date=/usr/sbin/date logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ oc get projects logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ : logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ kubectl get nodes logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ grep '^minikube' logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ which gsed logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | +++ which sed logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | ++ sed=/usr/sbin/sed logger.go:42: 12:26:48 | demand-backup-retry/8-verify-backup-completion | + kubectl wait -n kuttl-test-fine-meerkat '--for=jsonpath={.status.state}=Succeeded' perconaservermysqlbackups.ps.percona.com/demand-backup-retry --timeout=500s logger.go:42: 12:29:57 | demand-backup-retry/8-verify-backup-completion | perconaservermysqlbackup.ps.percona.com/demand-backup-retry condition met logger.go:42: 12:29:57 | demand-backup-retry/8-verify-backup-completion | test step completed 8-verify-backup-completion logger.go:42: 12:29:57 | demand-backup-retry/9-perform-restore | starting test step 9-perform-restore logger.go:42: 12:29:58 | demand-backup-retry/9-perform-restore | PerconaServerMySQLRestore:kuttl-test-fine-meerkat/restore-of-demand-backup-retry created logger.go:42: 12:35:33 | demand-backup-retry/9-perform-restore | test step completed 9-perform-restore logger.go:42: 12:35:33 | demand-backup-retry/10-verify-data-consistency | starting test step 10-verify-data-consistency logger.go:42: 12:35:33 | demand-backup-retry/10-verify-data-consistency | running command: [sh -c set -o errexit source ../../functions cluster_name=$(get_cluster_name) data=$(run_mysql_query_file "conf/data-query.sql" "-h $(get_haproxy_svc $(get_cluster_name))") kubectl create configmap -n "${NAMESPACE}" 10-demand-backup-retry-data --from-literal=data="${data}" # Compare saved data configmap_step04=$(kubectl get configmap 04-demand-backup-retry-data -n "${NAMESPACE}" -o json | jq -c '.data') configmap_step10=$(kubectl get configmap 10-demand-backup-retry-data -n "${NAMESPACE}" -o json | jq -c '.data') if [[ "$configmap_step04" != "$configmap_step10" ]]; then echo "Data after restore is different, check configmaps 04-demand-backup-retry-data and 10-demand-backup-retry-data" exit 1 fi] logger.go:42: 12:35:33 | demand-backup-retry/10-verify-data-consistency | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:33 | demand-backup-retry/10-verify-data-consistency | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:36 | demand-backup-retry/10-verify-data-consistency | mysql-clienttrue logger.go:42: 12:35:36 | demand-backup-retry/10-verify-data-consistency | ++ cat conf/data-query.sql logger.go:42: 12:35:36 | demand-backup-retry/10-verify-data-consistency | ++ kubectl -n kuttl-test-fine-meerkat exec -i mysql-client -- bash -c 'mysql -sN -h demand-backup-retry-haproxy -uroot -p'\''TiU.^U(v-qf2^CYkVo7'\'' 2>&1' logger.go:42: 12:35:36 | demand-backup-retry/10-verify-data-consistency | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:35:38 | demand-backup-retry/10-verify-data-consistency | configmap/10-demand-backup-retry-data created logger.go:42: 12:35:39 | demand-backup-retry/10-verify-data-consistency | test step completed 10-verify-data-consistency logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | starting test step 97-delete-all-backups logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | running command: [sh -c set -o errexit set -o xtrace source ../../functions verify_all_backups_deletion] logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | + source ../../functions logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ realpath ../../.. logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++++ pwd logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++ test_name=demand-backup-retry logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ [[ -z 8.4 ]] logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ MYSQL_VERSION=8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export MINIO_VER=5.4.0 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ MINIO_VER=5.4.0 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export VAULT_VER=0.16.1 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ VAULT_VER=0.16.1 logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++++ which gdate logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++++ which date logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ export date=/usr/sbin/date logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ date=/usr/sbin/date logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ oc get projects logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ : logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ kubectl get nodes logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ grep '^minikube' logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ which gsed logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | +++ which sed logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++ sed=/usr/sbin/sed logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | + verify_all_backups_deletion logger.go:42: 12:35:39 | demand-backup-retry/97-delete-all-backups | ++ kubectl get ps-backup -n kuttl-test-fine-meerkat -o 'jsonpath={.items[*].metadata.name}' logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | + for backup in $(kubectl get ps-backup -n "$NAMESPACE" -o jsonpath='{.items[*].metadata.name}') logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | + echo 'Checking backup: demand-backup-retry' logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | Checking backup: demand-backup-retry logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ kubectl get ps-backup demand-backup-retry -n kuttl-test-fine-meerkat -o json logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ jq -r $'\n\t\t\t\t\t.status.storage.type as $t |\n\t\t\t\t\t(if $t then .status.storage[$t] else {} end) as $s |\n\t\t\t\t\t"storage_type=\\($t // "")",\n\t\t\t\t\t"bucket=\\($s.bucket // $s.container // "")",\n\t\t\t\t\t"secret_name=\\($s.credentialsSecret // "")",\n\t\t\t\t\t"endpoint=\\($s.endpointUrl // "")",\n\t\t\t\t\t"region=\\($s.region // "")",\n\t\t\t\t\t"destination=\\(.status.destination // "")"\n\t\t\t\t' logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | + eval $'storage_type=s3\nbucket=operator-testing\nsecret_name=minio-secret\nendpoint=http://minio-service.kuttl-test-fine-meerkat:9000\nregion=us-east-1\ndestination=s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full' logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ storage_type=s3 logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ bucket=operator-testing logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ secret_name=minio-secret logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ region=us-east-1 logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | ++ destination=s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:35:40 | demand-backup-retry/97-delete-all-backups | + kubectl delete ps-backup demand-backup-retry -n kuttl-test-fine-meerkat logger.go:42: 12:35:41 | demand-backup-retry/97-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup-retry" deleted from kuttl-test-fine-meerkat namespace logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + local prefix=operator-testing/demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + prefix=demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + [[ -z s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full ]] logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + [[ s3 == azure ]] logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + local removed=false logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | + for i in {1..5} logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ count_files_on_storage s3 operator-testing demand-backup-retry-2026-05-05-12:26:21-full minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local storage_type=s3 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local bucket=operator-testing logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local secret_name=minio-secret logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local region=us-east-1 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ case "$storage_type" in logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ s3_count_files operator-testing demand-backup-retry-2026-05-05-12:26:21-full/ minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local bucket=operator-testing logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local prefix=demand-backup-retry-2026-05-05-12:26:21-full/ logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local secret_name=minio-secret logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local region=us-east-1 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | ++ local endpoint=http://minio-service.kuttl-test-fine-meerkat:9000 logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | +++ run_s3_command minio-secret us-east-1 http://minio-service.kuttl-test-fine-meerkat:9000 s3 ls s3://operator-testing/demand-backup-retry-2026-05-05-12:26:21-full/ --recursive --summarize logger.go:42: 12:35:42 | demand-backup-retry/97-delete-all-backups | +++ awk '/Total Objects/ {print $3}' logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | ++ local output=0 logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | ++ echo 0 logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | + [[ 0 == 0 ]] logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | + echo 'Backup removed: demand-backup-retry' logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | Backup removed: demand-backup-retry logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | + removed=true logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | + break logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | + true logger.go:42: 12:35:48 | demand-backup-retry/97-delete-all-backups | test step completed 97-delete-all-backups logger.go:42: 12:35:48 | demand-backup-retry/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 12:35:48 | demand-backup-retry/98-drop-finalizer | PerconaServerMySQL:kuttl-test-fine-meerkat/demand-backup-retry updated logger.go:42: 12:35:48 | demand-backup-retry/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 12:35:48 | demand-backup-retry/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++ test_name=demand-backup-retry logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup-retry/conf logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-retry logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-1238 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-1238 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ [[ -z 8.4 ]] logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export MYSQL_VERSION=8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ MYSQL_VERSION=8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++++ which date logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ export date=/usr/sbin/date logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ : logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ which gsed logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | +++ which sed logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | ++ sed=/usr/sbin/sed logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 12:35:49 | demand-backup-retry/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:35:50 | demand-backup-retry/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted from ps-operator namespace logger.go:42: 12:35:50 | demand-backup-retry/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 12:35:50 | demand-backup-retry/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 12:35:50 | demand-backup-retry/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:35:50 | demand-backup-retry/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 12:36:00 | demand-backup-retry/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 12:36:01 | demand-backup-retry | demand-backup-retry events from ns kuttl-test-fine-meerkat: logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:01 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/mysql-client to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:02 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:07 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:07 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-649c5b46f8 from 0 to 1 deployment-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:08 +0000 UTC Normal ReplicaSet.apps minio-service-649c5b46f8 SuccessfulCreate Created pod: minio-service-649c5b46f8-h7kf7 replicaset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:08 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:08 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/minio-service" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:09 +0000 UTC Normal Pod minio-service-post-job-jjtbt Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/minio-service-post-job-jjtbt to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:09 +0000 UTC Normal Pod minio-service-post-job-jjtbt.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:09 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-jjtbt job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:11 +0000 UTC Normal Pod minio-service-post-job-jjtbt.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" in 1.564s (1.564s including waiting). Image size: 28122288 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:11 +0000 UTC Normal Pod minio-service-post-job-jjtbt.spec.containers{minio-make-user} Created Created container: minio-make-user kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:11 +0000 UTC Normal Pod minio-service-post-job-jjtbt.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:11 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-a1240e51-9a85-45c4-8284-48a65697ad7c pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:12 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/minio-service-649c5b46f8-h7kf7 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:16 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a1240e51-9a85-45c4-8284-48a65697ad7c" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:16 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 14.909s (14.909s including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:16 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:17 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:23 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:31 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" in 7.657s (7.657s including waiting). Image size: 62642371 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:31 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7.spec.containers{minio} Created Created container: minio kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:31 +0000 UTC Normal Pod minio-service-649c5b46f8-h7kf7.spec.containers{minio} Started Started container minio kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:39 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:41 +0000 UTC Normal Pod aws-cli Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/aws-cli to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:41 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:44 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 2.191s (2.191s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:44 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container: aws-cli kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:44 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/datadir-demand-backup-retry-mysql-0" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-retry-mysql NoPods No matching pods found controllermanager logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Claim datadir-demand-backup-retry-mysql-0 Pod demand-backup-retry-mysql-0 in StatefulSet demand-backup-retry-mysql success statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:52 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Pod demand-backup-retry-mysql-0 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:53 +0000 UTC Normal Pod demand-backup-retry-orc-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:53 +0000 UTC Warning Pod demand-backup-retry-orc-0 FailedMount MountVolume.SetUp failed for volume "users" : secret "internal-demand-backup-retry" not found kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:53 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulCreate create Pod demand-backup-retry-orc-0 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:53 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-retry-orchestrator NoPods No matching pods found controllermanager logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:53 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged -> Initializing ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:54 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:54 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 139ms (139ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:54 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:54 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-dc19ab59-588d-43d1-9992-a3e15025b495 pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 96ms (96ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:56 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 106ms (106ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:57 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:19:57 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:04 +0000 UTC Normal Pod demand-backup-retry-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dc19ab59-588d-43d1-9992-a3e15025b495" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 127ms (127ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:09 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:24 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 15.208s (15.208s including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:24 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:24 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:24 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:28 +0000 UTC Normal Pod demand-backup-retry-orc-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:28 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulCreate create Pod demand-backup-retry-orc-1 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:29 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:29 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 116ms (116ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:29 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:29 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 107ms (107ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 103ms (103ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:31 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:48 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 23.868s (23.868s including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:48 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:48 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:48 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:49 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 583ms (583ms including waiting). Image size: 138677984 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:49 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:49 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/datadir-demand-backup-retry-mysql-1" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:57 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Claim datadir-demand-backup-retry-mysql-1 Pod demand-backup-retry-mysql-1 in StatefulSet demand-backup-retry-mysql success statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:20:57 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Pod demand-backup-retry-mysql-1 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:01 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-bbe41303-2311-4868-8b91-4808ae935bbd pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:01 +0000 UTC Normal Pod demand-backup-retry-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 127ms (127ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-retry-haproxy NoPods No matching pods found controllermanager logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulCreate create Pod demand-backup-retry-haproxy-0 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal Pod demand-backup-retry-orc-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:03 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulCreate create Pod demand-backup-retry-orc-2 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:04 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:04 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 121ms (121ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:04 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:04 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 110ms (110ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:05 +0000 UTC Normal Pod demand-backup-retry-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bbe41303-2311-4868-8b91-4808ae935bbd" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 134ms (134ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:07 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:07 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 149ms (149ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:07 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:07 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 108ms (108ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 94ms (94ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:09 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:22 +0000 UTC Warning Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:24 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 14.682s (14.682s including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:24 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:24 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:24 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 22.853s (22.853s including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 117ms (117ms including waiting). Image size: 138677984 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:57 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 12:21:57 Waiting for MySQL ready state 2026/05/05 12:21:57 MySQL is ready 2026/05/05 12:21:57 Peers: [3262333139303237.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 3339623635303832.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat] 2026/05/05 12:21:57 FQDN: demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:21:57 Primary: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat Replicas: [demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat] 2026/05/05 12:21:57 lookup demand-backup-retry-mysql-1 [10.218.153.16] 2026/05/05 12:21:57 PodIP: 10.218.153.16 2026/05/05 12:21:57 lookup demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat [10.218.152.10] 2026/05/05 12:21:57 PrimaryIP: 10.218.152.10 2026/05/05 12:21:57 Donor: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:21:57 Opening connection to 10.218.153.16 2026/05/05 12:21:57 Clone required: true 2026/05/05 12:21:57 Checking if a clone in progress 2026/05/05 12:21:57 Clone in progress: false 2026/05/05 12:21:57 Cloning from demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:21:57 Clone finished. Restarting container... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:21:57 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:04 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 92ms (92ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:33 +0000 UTC Warning Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:37 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:37 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:37 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/datadir-demand-backup-retry-mysql-2" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:37 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Claim datadir-demand-backup-retry-mysql-2 Pod demand-backup-retry-mysql-2 in StatefulSet demand-backup-retry-mysql success statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:37 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulCreate create Pod demand-backup-retry-mysql-2 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:41 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-1c677b79-7241-49d9-b5d5-f0148baf2df4 pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:42 +0000 UTC Normal Pod demand-backup-retry-mysql-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:43 +0000 UTC Normal Pod demand-backup-retry-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:43 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulCreate create Pod demand-backup-retry-haproxy-1 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:44 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:44 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 146ms (146ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:44 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:44 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 109ms (109ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 110ms (110ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:46 +0000 UTC Normal Pod demand-backup-retry-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1c677b79-7241-49d9-b5d5-f0148baf2df4" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:47 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:48 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 149ms (149ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:48 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:48 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:49 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:50 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 93ms (93ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:50 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:50 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:22:50 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:03 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulCreate create Pod demand-backup-retry-haproxy-2 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 153ms (153ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 14.657s (14.657s including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:04 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:05 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 218ms (218ms including waiting). Image size: 138677984 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:05 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:05 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 106ms (106ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 105ms (105ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:06 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:07 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:08 +0000 UTC Warning Pod demand-backup-retry-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 12:23:08 Waiting for MySQL ready state 2026/05/05 12:23:08 MySQL is ready 2026/05/05 12:23:08 Peers: [3162633438383535.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 3262333139303237.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 3339623635303832.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat] 2026/05/05 12:23:08 FQDN: demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:23:08 Primary: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat Replicas: [demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat] 2026/05/05 12:23:08 lookup demand-backup-retry-mysql-2 [10.218.154.19] 2026/05/05 12:23:08 PodIP: 10.218.154.19 2026/05/05 12:23:08 lookup demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat [10.218.152.10] 2026/05/05 12:23:08 PrimaryIP: 10.218.152.10 2026/05/05 12:23:08 Donor: demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:23:08 Opening connection to 10.218.154.19 2026/05/05 12:23:08 Clone required: true 2026/05/05 12:23:08 Checking if a clone in progress 2026/05/05 12:23:08 Clone in progress: false 2026/05/05 12:23:08 Cloning from demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:23:08 Clone finished. Restarting container... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:08 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:17 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 115ms (115ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:51 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:54 +0000 UTC Normal Pod sysbench-data-tlm2g Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/sysbench-data-tlm2g to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:54 +0000 UTC Normal Job.batch sysbench-data SuccessfulCreate Created pod: sysbench-data-tlm2g job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:23:55 +0000 UTC Normal Pod sysbench-data-tlm2g.spec.containers{sysbench} Pulling Pulling image "perconalab/percona-server-mysql-operator:sysbench" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:24:01 +0000 UTC Normal Pod sysbench-data-tlm2g.spec.containers{sysbench} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:sysbench" in 6.298s (6.298s including waiting). Image size: 240343061 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:24:01 +0000 UTC Normal Pod sysbench-data-tlm2g.spec.containers{sysbench} Created Created container: sysbench kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:24:01 +0000 UTC Normal Pod sysbench-data-tlm2g.spec.containers{sysbench} Started Started container sysbench kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:25:34 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry AsyncReplicationNotReady demand-backup-retry-mysql-2: [replication_lag] ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:25:34 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged Ready -> Initializing ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:25:53 +0000 UTC Normal Job.batch sysbench-data Completed Job completed job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:21 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-demand-backup-retry-minio-6k665 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:21 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio SuccessfulCreate Created pod: xb-demand-backup-retry-minio-6k665 job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:22 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:22 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 132ms (132ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:22 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:22 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:24 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:24 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 104ms (104ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:24 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:24 +0000 UTC Normal Pod xb-demand-backup-retry-minio-6k665.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:26 +0000 UTC Normal Pod aws-cli-1777983985791949244 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/aws-cli-1777983985791949244 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:26 +0000 UTC Normal Pod aws-cli-1777983985791949244.spec.containers{aws-cli-1777983985791949244} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:28 +0000 UTC Normal Pod aws-cli-1777983985791949244.spec.containers{aws-cli-1777983985791949244} Pulled Successfully pulled image "perconalab/awscli" in 2.038s (2.038s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:28 +0000 UTC Normal Pod aws-cli-1777983985791949244.spec.containers{aws-cli-1777983985791949244} Created Created container: aws-cli-1777983985791949244 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:28 +0000 UTC Normal Pod aws-cli-1777983985791949244.spec.containers{aws-cli-1777983985791949244} Started Started container aws-cli-1777983985791949244 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:39 +0000 UTC Normal Pod aws-cli-1777983999115462900 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/aws-cli-1777983999115462900 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:39 +0000 UTC Normal Pod aws-cli-1777983999115462900.spec.containers{aws-cli-1777983999115462900} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:40 +0000 UTC Normal Pod aws-cli-1777983999115462900.spec.containers{aws-cli-1777983999115462900} Pulled Successfully pulled image "perconalab/awscli" in 95ms (95ms including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:40 +0000 UTC Normal Pod aws-cli-1777983999115462900.spec.containers{aws-cli-1777983999115462900} Created Created container: aws-cli-1777983999115462900 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:40 +0000 UTC Normal Pod aws-cli-1777983999115462900.spec.containers{aws-cli-1777983999115462900} Started Started container aws-cli-1777983999115462900 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 143ms (143ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:44 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:45 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 137ms (137ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 228ms (228ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 102ms (102ms including waiting). Image size: 138677984 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-demand-backup-retry-minio-8j7q6 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 129ms (129ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:56 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio SuccessfulCreate Created pod: xb-demand-backup-retry-minio-8j7q6 job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:58 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:58 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 103ms (103ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:58 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:26:58 +0000 UTC Normal Pod xb-demand-backup-retry-minio-8j7q6.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:09 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 246ms (246ms including waiting). Image size: 138677984 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-demand-backup-retry-minio-psg57 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 170ms (170ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:19 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio SuccessfulCreate Created pod: xb-demand-backup-retry-minio-psg57 job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:20 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod demand-backup-retry-mysql-1_kuttl-test-fine-meerkat(36109a51-eead-4923-bea7-b47ccd1c4dcb) kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:21 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:21 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 123ms (123ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:21 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:21 +0000 UTC Normal Pod xb-demand-backup-retry-minio-psg57.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:30 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 12:27:04 Waiting for MySQL ready state 2026/05/05 12:27:04 MySQL is ready 2026/05/05 12:27:04 Peers: [3162633438383535.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 3339623635303832.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 6266386366653231.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat] 2026/05/05 12:27:04 FQDN: demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:27:04 Primary: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat Replicas: [demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat] 2026/05/05 12:27:04 lookup demand-backup-retry-mysql-1 [10.218.153.18] 2026/05/05 12:27:04 PodIP: 10.218.153.18 2026/05/05 12:27:04 lookup demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat [10.218.152.10] 2026/05/05 12:27:04 PrimaryIP: 10.218.152.10 2026/05/05 12:27:04 Donor: demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:27:04 Opening connection to 10.218.153.18 2026/05/05 12:27:04 Clone required: true 2026/05/05 12:27:04 Checking if a clone in progress 2026/05/05 12:27:04 Clone in progress: false 2026/05/05 12:27:04 Cloning from demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:27:30 Clone finished. Restarting container... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:30 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:27:33 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 110ms (110ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-demand-backup-retry-minio-2lz8c to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 124ms (124ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:01 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio SuccessfulCreate Created pod: xb-demand-backup-retry-minio-2lz8c job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:03 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:03 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 99ms (99ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:03 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:28:03 +0000 UTC Normal Pod xb-demand-backup-retry-minio-2lz8c.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:23 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-demand-backup-retry-minio-w4877 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:23 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:23 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 133ms (133ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:23 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:23 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio SuccessfulCreate Created pod: xb-demand-backup-retry-minio-w4877 job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:24 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:25 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:25 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 149ms (149ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:25 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:25 +0000 UTC Normal Pod xb-demand-backup-retry-minio-w4877.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:55 +0000 UTC Normal Job.batch xb-demand-backup-retry-minio Completed Job completed job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Warning Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task c100db37551ce550bdb247304bb88b1cd61381244a013b5ac3d2f16bd65a4065 not found: not found kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulDelete delete Pod demand-backup-retry-haproxy-2 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulDelete delete Pod demand-backup-retry-mysql-2 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulDelete delete Pod demand-backup-retry-orc-2 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:58 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Warning Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 9d4993152626ec8a8a16c4ba71042f7b800b48a54a9765c609618d7a0f0e47cb not found: not found kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulDelete delete Pod demand-backup-retry-haproxy-1 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:29:59 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulDelete delete Pod demand-backup-retry-orc-1 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Warning Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: failed to create exec "b5bb7c0d89d935b0ffb509dde19b0318678f226c5c821a2cd8ba4f4f9d3ba1dd": cannot exec in a stopped state: unknown kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal StatefulSet.apps demand-backup-retry-haproxy SuccessfulDelete delete Pod demand-backup-retry-haproxy-0 in StatefulSet demand-backup-retry-haproxy successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:00 +0000 UTC Normal StatefulSet.apps demand-backup-retry-orc SuccessfulDelete delete Pod demand-backup-retry-orc-0 in StatefulSet demand-backup-retry-orc successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:02 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulDelete delete Pod demand-backup-retry-mysql-1 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:05 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 12:30:05 MySQL state is not ready... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:06 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:06 +0000 UTC Normal StatefulSet.apps demand-backup-retry-mysql SuccessfulDelete delete Pod demand-backup-retry-mysql-0 in StatefulSet demand-backup-retry-mysql successful statefulset-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:07 +0000 UTC Warning Pod demand-backup-retry-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 12:30:07 MySQL state is not ready... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:09 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:12 +0000 UTC Warning Pod demand-backup-retry-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:28 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/xb-restore-restore-of-demand-backup-retry-r8jvm to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:28 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup-retry SuccessfulCreate Created pod: xb-restore-restore-of-demand-backup-retry-r8jvm job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:36 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dc19ab59-588d-43d1-9992-a3e15025b495" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:38 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:38 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 129ms (129ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:38 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:38 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:39 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:40 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 207ms (207ms including waiting). Image size: 448435717 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:40 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:30:40 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-retry-r8jvm.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:10 +0000 UTC Normal Pod demand-backup-retry-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:10 +0000 UTC Warning Pod demand-backup-retry-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-dc19ab59-588d-43d1-9992-a3e15025b495" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:10 +0000 UTC Normal Pod demand-backup-retry-orc-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:10 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-retry ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:10 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup-retry Completed Job completed job-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:11 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:11 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 139ms (139ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:11 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:11 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 114ms (114ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 109ms (109ms including waiting). Image size: 73407149 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:13 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:24 +0000 UTC Normal Pod demand-backup-retry-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dc19ab59-588d-43d1-9992-a3e15025b495" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:25 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:25 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 127ms (127ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:25 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:25 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:28 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:28 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 120ms (120ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:28 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:28 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:28 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:41 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 12.859s (12.859s including waiting). Image size: 449692550 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:41 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:41 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:41 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:45 +0000 UTC Normal Pod demand-backup-retry-orc-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:45 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:45 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 136ms (136ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:45 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:45 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:47 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.764s (5.764s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:47 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:47 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:47 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:48 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.229s (1.229s including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:48 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:48 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:48 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:48 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 104ms (104ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:49 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:31:49 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:01 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:01 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:01 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/datadir-demand-backup-retry-mysql-1" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:02 +0000 UTC Normal Pod demand-backup-retry-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-0 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 124ms (124ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:03 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-512ed5e2-d2a0-49a1-ab4b-b161d048f101 pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 98ms (98ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 109ms (109ms including waiting). Image size: 103553241 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:05 +0000 UTC Normal Pod demand-backup-retry-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:09 +0000 UTC Normal Pod demand-backup-retry-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-512ed5e2-d2a0-49a1-ab4b-b161d048f101" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:11 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:11 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 144ms (144ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:11 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:11 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 103ms (103ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:20 +0000 UTC Normal Pod demand-backup-retry-orc-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-orc-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:21 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:21 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 133ms (133ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:21 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:21 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:22 +0000 UTC Warning Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:23 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:24 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.495s (1.495s including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:24 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:24 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:24 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:25 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 107ms (107ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:25 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:25 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:29 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 15.425s (15.425s including waiting). Image size: 449692550 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:29 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:29 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:29 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:32 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 2.888s (2.888s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:32 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:32 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:47 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 529ms (529ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:32:58 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod demand-backup-retry-mysql-1_kuttl-test-fine-meerkat(f9549a79-94e6-4f01-83f2-d7314a86147c) kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:09 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 12:32:41 Waiting for MySQL ready state 2026/05/05 12:32:41 MySQL is ready 2026/05/05 12:32:41 Peers: [6130613737383363.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 6330316432313637.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat] 2026/05/05 12:32:41 FQDN: demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:32:41 Primary: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat Replicas: [demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat] 2026/05/05 12:32:41 lookup demand-backup-retry-mysql-1 [10.218.153.20] 2026/05/05 12:32:41 PodIP: 10.218.153.20 2026/05/05 12:32:41 lookup demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat [10.218.152.15] 2026/05/05 12:32:41 PrimaryIP: 10.218.152.15 2026/05/05 12:32:41 Donor: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:32:41 Opening connection to 10.218.153.20 2026/05/05 12:32:41 Clone required: true 2026/05/05 12:32:41 Checking if a clone in progress 2026/05/05 12:32:41 Clone in progress: false 2026/05/05 12:32:41 Cloning from demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:33:09 Clone finished. Restarting container... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:09 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:13 +0000 UTC Normal Pod demand-backup-retry-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 91ms (91ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:33 +0000 UTC Warning Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:46 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:46 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:46 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fine-meerkat/datadir-demand-backup-retry-mysql-2" pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:50 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-retry-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-f0430fb4-d801-4bf9-ab50-6ffba58ea826 pd.csi.storage.gke.io_gke-3538a24387d44849857c-0d6b-3c9e-vm_8d1a9da1-ab36-4958-8d96-3d73f6a3f5d8 logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:50 +0000 UTC Normal Pod demand-backup-retry-mysql-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-mysql-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:54 +0000 UTC Normal Pod demand-backup-retry-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f0430fb4-d801-4bf9-ab50-6ffba58ea826" attachdetach-controller logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:56 +0000 UTC Normal Pod demand-backup-retry-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-1 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-s5r2 default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:56 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:56 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 142ms (142ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:56 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:56 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:33:58 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 1.932s (1.932s including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 129ms (129ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 159ms (159ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:00 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:02 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:02 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 103ms (103ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:02 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:02 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:02 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:17 +0000 UTC Normal Pod demand-backup-retry-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/demand-backup-retry-haproxy-2 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-vztk default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:17 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 15.304s (15.304s including waiting). Image size: 449692550 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:17 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 126ms (126ms including waiting). Image size: 124972222 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:18 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:19 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 1.941s (1.941s including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 117ms (117ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:21 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:23 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.724s (5.724s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:23 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:23 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:36 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 617ms (617ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:48 +0000 UTC Warning Pod demand-backup-retry-mysql-2.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod demand-backup-retry-mysql-2_kuttl-test-fine-meerkat(a8c1d934-6372-4f0f-8fe1-cdf52439796a) kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:56 +0000 UTC Warning Pod demand-backup-retry-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 12:34:30 Waiting for MySQL ready state 2026/05/05 12:34:30 MySQL is ready 2026/05/05 12:34:30 Peers: [6130613737383363.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 6230306238643730.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat 6330316432313637.demand-backup-retry-mysql-unready.kuttl-test-fine-meerkat] 2026/05/05 12:34:30 FQDN: demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:34:30 Primary: demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat Replicas: [demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat demand-backup-retry-mysql-2.demand-backup-retry-mysql.kuttl-test-fine-meerkat] 2026/05/05 12:34:30 lookup demand-backup-retry-mysql-2 [10.218.154.29] 2026/05/05 12:34:30 PodIP: 10.218.154.29 2026/05/05 12:34:30 lookup demand-backup-retry-mysql-0.demand-backup-retry-mysql.kuttl-test-fine-meerkat [10.218.152.15] 2026/05/05 12:34:30 PrimaryIP: 10.218.152.15 2026/05/05 12:34:30 Donor: demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:34:30 Opening connection to 10.218.154.29 2026/05/05 12:34:30 Clone required: true 2026/05/05 12:34:30 Checking if a clone in progress 2026/05/05 12:34:30 Clone in progress: false 2026/05/05 12:34:30 Cloning from demand-backup-retry-mysql-1.demand-backup-retry-mysql.kuttl-test-fine-meerkat 2026/05/05 12:34:56 Clone finished. Restarting container... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:56 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:34:59 +0000 UTC Normal Pod demand-backup-retry-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 122ms (122ms including waiting). Image size: 434777704 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:45 +0000 UTC Normal Pod aws-cli-1777984544682236359 Binding Scheduled Successfully assigned kuttl-test-fine-meerkat/aws-cli-1777984544682236359 to gke-jen-ps-1238-7677a7b6-default-pool-28896d7f-mttt default-scheduler logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:45 +0000 UTC Normal Pod aws-cli-1777984544682236359.spec.containers{aws-cli-1777984544682236359} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:45 +0000 UTC Normal Pod aws-cli-1777984544682236359.spec.containers{aws-cli-1777984544682236359} Pulled Successfully pulled image "perconalab/awscli" in 94ms (94ms including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:45 +0000 UTC Normal Pod aws-cli-1777984544682236359.spec.containers{aws-cli-1777984544682236359} Created Created container: aws-cli-1777984544682236359 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:45 +0000 UTC Normal Pod aws-cli-1777984544682236359.spec.containers{aws-cli-1777984544682236359} Started Started container aws-cli-1777984544682236359 kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:49 +0000 UTC Warning PodDisruptionBudget.policy demand-backup-retry-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "demand-backup-retry-haproxy-0" controllermanager logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Warning PodDisruptionBudget.policy demand-backup-retry-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "demand-backup-retry-haproxy-1" controllermanager logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Warning Pod demand-backup-retry-orc-1.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.218.153.19:3000/api/health": dial tcp 10.218.153.19:3000: connect: connection refused kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:50 +0000 UTC Normal Pod demand-backup-retry-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:51 +0000 UTC Warning Pod demand-backup-retry-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 12:35:51 MySQL state is not ready... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:51 +0000 UTC Warning Pod demand-backup-retry-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 12:35:51 MySQL state is not ready... kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:51 +0000 UTC Warning Pod demand-backup-retry-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.218.152.16:3000/api/health": dial tcp 10.218.152.16:3000: connect: connection refused kubelet logger.go:42: 12:36:01 | demand-backup-retry | 2026-05-05 12:35:56 +0000 UTC Warning Pod demand-backup-retry-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 12:35:56 MySQL state is not ready... kubelet logger.go:42: 12:36:01 | demand-backup-retry | Deleting namespace "kuttl-test-fine-meerkat" === NAME kuttl harness.go:404: run tests finished harness.go:511: cleaning up harness.go:568: removing temp folder: "" --- PASS: kuttl (1066.48s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/demand-backup-retry (1065.78s) PASS