=== RUN kuttl harness.go:460: starting setup harness.go:258: running tests using configured kubeconfig. harness.go:281: Successful connection to cluster at: https://34.56.32.198 harness.go:366: running tests harness.go:77: going to run test suite with timeout of 180 seconds for each step harness.go:378: testsuite: e2e-tests/tests has 46 tests === RUN kuttl/harness === RUN kuttl/harness/demand-backup-cloud === PAUSE kuttl/harness/demand-backup-cloud === CONT kuttl/harness/demand-backup-cloud logger.go:42: 10:23:46 | demand-backup-cloud | Creating namespace "kuttl-test-optimal-kodiak" logger.go:42: 10:23:46 | demand-backup-cloud/0-minio-secret | starting test step 0-minio-secret logger.go:42: 10:23:47 | demand-backup-cloud/0-minio-secret | Secret:kuttl-test-optimal-kodiak/minio-secret created logger.go:42: 10:23:48 | demand-backup-cloud/0-minio-secret | test step completed 0-minio-secret logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | starting test step 1-deploy-operator logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_tls_cluster_secrets deploy_client] logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | + source ../../functions logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ realpath ../../.. logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++++ pwd logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++ test_name=demand-backup-cloud logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ [[ -z 8.4 ]] logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ MYSQL_VERSION=8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++++ which gdate logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | ++++ which date logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ oc get projects logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ : logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ kubectl get nodes logger.go:42: 10:23:48 | demand-backup-cloud/1-deploy-operator | +++ grep '^minikube' logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | +++ which gsed logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | +++ which sed logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | ++ sed=/usr/sbin/sed logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | ++ oc get projects logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | +++ kubectl version -o json logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | +++ grep '\-eks\-' logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | grep: warning: stray \ before - logger.go:42: 10:23:49 | demand-backup-cloud/1-deploy-operator | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | ++ '[' ']' logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | ++ EKS=0 logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + init_temp_dir logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + rm -rf /tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + mkdir -p /tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + apply_s3_storage_secrets logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + apply_minio_secret logger.go:42: 10:23:50 | demand-backup-cloud/1-deploy-operator | + kubectl -n kuttl-test-optimal-kodiak apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf/minio-secret.yml logger.go:42: 10:23:51 | demand-backup-cloud/1-deploy-operator | Warning: resource secrets/minio-secret is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. logger.go:42: 10:23:51 | demand-backup-cloud/1-deploy-operator | secret/minio-secret configured logger.go:42: 10:23:51 | demand-backup-cloud/1-deploy-operator | + kubectl -n kuttl-test-optimal-kodiak apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf/cloud-secret.yml logger.go:42: 10:23:52 | demand-backup-cloud/1-deploy-operator | secret/aws-s3-secret created logger.go:42: 10:23:53 | demand-backup-cloud/1-deploy-operator | secret/do-spaces-secret created logger.go:42: 10:23:53 | demand-backup-cloud/1-deploy-operator | secret/gcp-cs-secret created logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | secret/azure-secret created logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + deploy_operator logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + destroy_operator logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + true logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 10:23:54 | demand-backup-cloud/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + true logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + create_namespace ps-operator logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + local namespace=ps-operator logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + [[ -n '' ]] logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 10:23:55 | demand-backup-cloud/1-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 10:23:56 | demand-backup-cloud/1-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 10:23:56 | demand-backup-cloud/1-deploy-operator | namespace/ps-operator created logger.go:42: 10:23:56 | demand-backup-cloud/1-deploy-operator | + apply_crd logger.go:42: 10:23:56 | demand-backup-cloud/1-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/crd.yaml logger.go:42: 10:23:58 | demand-backup-cloud/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 10:23:58 | demand-backup-cloud/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | + apply_rbac logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | + local rbac_file logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | + rbac_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cw-rbac.yaml logger.go:42: 10:23:59 | demand-backup-cloud/1-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cw-rbac.yaml logger.go:42: 10:24:01 | demand-backup-cloud/1-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 10:24:01 | demand-backup-cloud/1-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + local operator_file logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + operator_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cw-operator.yaml logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "VERBOSE"' logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:24:02 | demand-backup-cloud/1-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-1236-862a05f9"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cw-operator.yaml logger.go:42: 10:24:04 | demand-backup-cloud/1-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 10:24:05 | demand-backup-cloud/1-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 10:24:05 | demand-backup-cloud/1-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 10:24:05 | demand-backup-cloud/1-deploy-operator | + kubectl -n kuttl-test-optimal-kodiak apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf/ssl-secret.yaml logger.go:42: 10:24:06 | demand-backup-cloud/1-deploy-operator | secret/test-ssl created logger.go:42: 10:24:06 | demand-backup-cloud/1-deploy-operator | + deploy_client logger.go:42: 10:24:06 | demand-backup-cloud/1-deploy-operator | + kubectl -n kuttl-test-optimal-kodiak apply -f - logger.go:42: 10:24:06 | demand-backup-cloud/1-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:24:06 | demand-backup-cloud/1-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf/client.yaml logger.go:42: 10:24:07 | demand-backup-cloud/1-deploy-operator | pod/mysql-client created logger.go:42: 10:24:08 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:08 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:09 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:10 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:10 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:11 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:12 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:12 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:13 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:14 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:14 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:14 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:16 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:16 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:16 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:18 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:18 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:18 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:20 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:20 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:20 | demand-backup-cloud/1-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | INFO Found 1 resource(s). logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | ASSERT PASS logger.go:42: 10:24:22 | demand-backup-cloud/1-deploy-operator | test step completed 1-deploy-operator logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | starting test step 2-create-cluster logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval ".spec.mysql.size=3" - \ | yq eval ".spec.proxy.haproxy.enabled=true" - \ | yq eval ".spec.proxy.haproxy.size=3" - \ | yq eval ".spec.orchestrator.enabled=true" - \ | yq eval ".spec.orchestrator.size=3" - \ | yq eval '.spec.backup.storages.aws-s3.type="s3"' - \ | yq eval ".spec.backup.storages.aws-s3.verifyTLS=true" - \ | yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - \ | yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - \ | yq eval ".spec.backup.storages.gcp-cs.verifyTLS=true" - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - \ | yq eval '.spec.backup.storages.azure-blob.type="azure"' - \ | yq eval ".spec.backup.storages.azure-blob.verifyTLS=true" - \ | yq eval '.spec.backup.storages.azure-blob.azure.container="operator-testing"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | + source ../../functions logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ realpath ../../.. logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++++ pwd logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++ test_name=demand-backup-cloud logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ [[ -z 8.4 ]] logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ MYSQL_VERSION=8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++++ which gdate logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | ++++ which date logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ date=/usr/sbin/date logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ oc get projects logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ : logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ kubectl get nodes logger.go:42: 10:24:22 | demand-backup-cloud/2-create-cluster | +++ grep '^minikube' logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | +++ which gsed logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | +++ which sed logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ sed=/usr/sbin/sed logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ oc get projects logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | +++ kubectl version -o json logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | +++ grep '\-eks\-' logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | grep: warning: stray \ before - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ '[' ']' logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ EKS=0 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + get_cr logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local name_suffix= logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + local cr_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cr.yaml logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.metadata.name="%s"' demand-backup-cloud logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.metadata.name="demand-backup-cloud"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy/cr.yaml logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval 'del(.spec.secretsName)' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-1236-862a05f9"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.type="s3"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.backup.storages.aws-s3.verifyTLS=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.backup.storages.gcp-cs.verifyTLS=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + '[' -n '' ']' logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.4"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.4"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval .spec.backup.storages.azure-blob.verifyTLS=true - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.type="azure"' - logger.go:42: 10:24:23 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.container="operator-testing"' - logger.go:42: 10:24:24 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 10:24:24 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - logger.go:42: 10:24:24 | demand-backup-cloud/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - logger.go:42: 10:24:24 | demand-backup-cloud/2-create-cluster | + kubectl -n kuttl-test-optimal-kodiak apply -f - logger.go:42: 10:24:26 | demand-backup-cloud/2-create-cluster | perconaservermysql.ps.percona.com/demand-backup-cloud created logger.go:42: 10:28:51 | demand-backup-cloud/2-create-cluster | test step completed 2-create-cluster logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | starting test step 3-write-data logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name))" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name))"] logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | + source ../../functions logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ realpath ../../.. logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++++ pwd logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++ test_name=demand-backup-cloud logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ [[ -z 8.4 ]] logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++++ which gdate logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++++ which date logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ date=/usr/sbin/date logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ oc get projects logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ : logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ kubectl get nodes logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ grep '^minikube' logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ which gsed logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ which sed logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++ sed=/usr/sbin/sed logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | ++ oc get projects logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ kubectl version -o json logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | +++ grep '\-eks\-' logger.go:42: 10:28:51 | demand-backup-cloud/3-write-data | grep: warning: stray \ before - logger.go:42: 10:28:52 | demand-backup-cloud/3-write-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:28:52 | demand-backup-cloud/3-write-data | ++ '[' ']' logger.go:42: 10:28:52 | demand-backup-cloud/3-write-data | ++ EKS=0 logger.go:42: 10:28:52 | demand-backup-cloud/3-write-data | +++ get_cluster_name logger.go:42: 10:28:52 | demand-backup-cloud/3-write-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ get_haproxy_svc demand-backup-cloud logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ local cluster=demand-backup-cloud logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ echo demand-backup-cloud-haproxy logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h demand-backup-cloud-haproxy' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | + local 'host=-h demand-backup-cloud-haproxy' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ get_user_pass root logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ local user=root logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | +++ get_cluster_name logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ local secret=demand-backup-cloud-secrets logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:28:53 | demand-backup-cloud/3-write-data | ++ base64 --decode logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + local pod= logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | ++ get_client_pod logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | ++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + client_pod=mysql-client logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + wait_pod mysql-client logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + local pod=mysql-client logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + local ns=kuttl-test-optimal-kodiak logger.go:42: 10:28:54 | demand-backup-cloud/3-write-data | + set +o xtrace logger.go:42: 10:28:55 | demand-backup-cloud/3-write-data | mysql-clienttrue logger.go:42: 10:28:55 | demand-backup-cloud/3-write-data | + kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h demand-backup-cloud-haproxy -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:28:55 | demand-backup-cloud/3-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:28:55 | demand-backup-cloud/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | + : logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | +++ get_cluster_name logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | ++ get_haproxy_svc demand-backup-cloud logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | ++ local cluster=demand-backup-cloud logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | ++ echo demand-backup-cloud-haproxy logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h demand-backup-cloud-haproxy' logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | + local 'host=-h demand-backup-cloud-haproxy' logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | ++ get_user_pass root logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | ++ local user=root logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | +++ get_cluster_name logger.go:42: 10:28:56 | demand-backup-cloud/3-write-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:28:57 | demand-backup-cloud/3-write-data | ++ local secret=demand-backup-cloud-secrets logger.go:42: 10:28:57 | demand-backup-cloud/3-write-data | ++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:28:57 | demand-backup-cloud/3-write-data | ++ base64 --decode logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + local pod= logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | ++ get_client_pod logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | ++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + client_pod=mysql-client logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + wait_pod mysql-client logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + local pod=mysql-client logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + local ns=kuttl-test-optimal-kodiak logger.go:42: 10:28:58 | demand-backup-cloud/3-write-data | + set +o xtrace logger.go:42: 10:28:59 | demand-backup-cloud/3-write-data | mysql-clienttrue logger.go:42: 10:28:59 | demand-backup-cloud/3-write-data | + kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h demand-backup-cloud-haproxy -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:28:59 | demand-backup-cloud/3-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:28:59 | demand-backup-cloud/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:29:00 | demand-backup-cloud/3-write-data | + : logger.go:42: 10:29:00 | demand-backup-cloud/3-write-data | test step completed 3-write-data logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | starting test step 4-move-primary-before-backup logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary_pod_from_label="$(get_primary_from_label)" kubectl delete pod -n ${NAMESPACE} ${primary_pod_from_label} wait_cluster_consistency_async "${test_name}" "3" "3" new_primary_pod_from_label="$(get_primary_from_label)" if [ "${primary_pod_from_label}" == "${new_primary_pod_from_label}" ]; then echo "Old (${primary_pod_from_label}) and new (${new_primary_pod_from_label}) primary are the same (the failover didn't happen)!" exit 1 fi] logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | + source ../../functions logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ realpath ../../.. logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++++ pwd logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++ test_name=demand-backup-cloud logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ [[ -z 8.4 ]] logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ MYSQL_VERSION=8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export MINIO_VER=5.4.0 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ MINIO_VER=5.4.0 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ export VAULT_VER=0.16.1 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ VAULT_VER=0.16.1 logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++++ which gdate logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++++ which date logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ date=/usr/sbin/date logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ oc get projects logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ : logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ kubectl get nodes logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ grep '^minikube' logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ which gsed logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ which sed logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++ sed=/usr/sbin/sed logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | ++ oc get projects logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ kubectl version -o json logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | +++ grep '\-eks\-' logger.go:42: 10:29:00 | demand-backup-cloud/4-move-primary-before-backup | grep: warning: stray \ before - logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | ++ '[' ']' logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | ++ EKS=0 logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl -n kuttl-test-optimal-kodiak get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | + primary_pod_from_label=demand-backup-cloud-mysql-0 logger.go:42: 10:29:01 | demand-backup-cloud/4-move-primary-before-backup | + kubectl delete pod -n kuttl-test-optimal-kodiak demand-backup-cloud-mysql-0 logger.go:42: 10:29:02 | demand-backup-cloud/4-move-primary-before-backup | pod "demand-backup-cloud-mysql-0" deleted from kuttl-test-optimal-kodiak namespace logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + wait_cluster_consistency_async demand-backup-cloud 3 3 logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + local cluster_name=demand-backup-cloud logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + local cluster_size=3 logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + local orc_size=3 logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + '[' -z 3 ']' logger.go:42: 10:29:22 | demand-backup-cloud/4-move-primary-before-backup | + sleep 7 logger.go:42: 10:29:29 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.state}' logger.go:42: 10:29:30 | demand-backup-cloud/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 10:29:30 | demand-backup-cloud/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 10:29:30 | demand-backup-cloud/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 10:29:30 | demand-backup-cloud/4-move-primary-before-backup | + sleep 15 logger.go:42: 10:29:45 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.state}' logger.go:42: 10:29:45 | demand-backup-cloud/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 10:29:45 | demand-backup-cloud/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 10:29:45 | demand-backup-cloud/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 10:29:45 | demand-backup-cloud/4-move-primary-before-backup | + sleep 15 logger.go:42: 10:30:00 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.state}' logger.go:42: 10:30:01 | demand-backup-cloud/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 10:30:01 | demand-backup-cloud/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 10:30:01 | demand-backup-cloud/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 10:30:01 | demand-backup-cloud/4-move-primary-before-backup | + sleep 15 logger.go:42: 10:30:16 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.state}' logger.go:42: 10:30:16 | demand-backup-cloud/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 10:30:16 | demand-backup-cloud/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 10:30:16 | demand-backup-cloud/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 10:30:16 | demand-backup-cloud/4-move-primary-before-backup | + sleep 15 logger.go:42: 10:30:31 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.state}' logger.go:42: 10:30:32 | demand-backup-cloud/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 10:30:32 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.mysql.ready}' logger.go:42: 10:30:33 | demand-backup-cloud/4-move-primary-before-backup | + [[ 3 == \3 ]] logger.go:42: 10:30:33 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 10:30:33 | demand-backup-cloud/4-move-primary-before-backup | + [[ 3 == \3 ]] logger.go:42: 10:30:33 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 10:30:34 | demand-backup-cloud/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 10:30:34 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl get ps demand-backup-cloud -n kuttl-test-optimal-kodiak -o 'jsonpath={.status.state}' logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | ++ kubectl -n kuttl-test-optimal-kodiak get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | + new_primary_pod_from_label=demand-backup-cloud-mysql-2 logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | + '[' demand-backup-cloud-mysql-0 == demand-backup-cloud-mysql-2 ']' logger.go:42: 10:30:35 | demand-backup-cloud/4-move-primary-before-backup | test step completed 4-move-primary-before-backup logger.go:42: 10:30:35 | demand-backup-cloud/5-create-backup-s3 | starting test step 5-create-backup-s3 logger.go:42: 10:30:36 | demand-backup-cloud/5-create-backup-s3 | PerconaServerMySQLBackup:kuttl-test-optimal-kodiak/demand-backup-cloud-s3 created logger.go:42: 10:30:49 | demand-backup-cloud/5-create-backup-s3 | test step completed 5-create-backup-s3 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | starting test step 6-delete-data logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 06-delete-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | + source ../../functions logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ realpath ../../.. logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++++ pwd logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++ test_name=demand-backup-cloud logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ [[ -z 8.4 ]] logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++++ which gdate logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | ++++ which date logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ date=/usr/sbin/date logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ oc get projects logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ : logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ kubectl get nodes logger.go:42: 10:30:49 | demand-backup-cloud/6-delete-data | +++ grep '^minikube' logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ which gsed logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ which sed logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | ++ oc get projects logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ kubectl version -o json logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ grep '\-eks\-' logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | grep: warning: stray \ before - logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | ++ '[' ']' logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | ++ EKS=0 logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ get_cluster_name logger.go:42: 10:30:50 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | ++ get_haproxy_svc demand-backup-cloud logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | ++ local cluster=demand-backup-cloud logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | ++ echo demand-backup-cloud-haproxy logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-cloud-haproxy' logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | + local 'host=-h demand-backup-cloud-haproxy' logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | ++ get_user_pass root logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | ++ local user=root logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | +++ get_cluster_name logger.go:42: 10:30:51 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | ++ local secret=demand-backup-cloud-secrets logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | ++ base64 --decode logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | + local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | + local pod= logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | ++ get_client_pod logger.go:42: 10:30:52 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + client_pod=mysql-client logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + wait_pod mysql-client logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + local pod=mysql-client logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + local ns=kuttl-test-optimal-kodiak logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + set +o xtrace logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | mysql-clienttrue logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-cloud-haproxy -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:30:53 | demand-backup-cloud/6-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:30:54 | demand-backup-cloud/6-delete-data | + : logger.go:42: 10:30:54 | demand-backup-cloud/6-delete-data | ++ get_cluster_name logger.go:42: 10:30:54 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | + cluster_name=demand-backup-cloud logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | + for i in 0 1 2 logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | +++ get_user_pass root logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | +++ local user=root logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | ++++ get_cluster_name logger.go:42: 10:30:55 | demand-backup-cloud/6-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | +++ base64 --decode logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | ++ local pod= logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | +++ get_client_pod logger.go:42: 10:30:56 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ client_pod=mysql-client logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ wait_pod mysql-client logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ local pod=mysql-client logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ set +o xtrace logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | mysql-clienttrue logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:30:57 | demand-backup-cloud/6-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:30:58 | demand-backup-cloud/6-delete-data | ++ : logger.go:42: 10:30:58 | demand-backup-cloud/6-delete-data | + data= logger.go:42: 10:30:58 | demand-backup-cloud/6-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 06-delete-data-s3-0 --from-literal=data= logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | configmap/06-delete-data-s3-0 created logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | + for i in 0 1 2 logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | +++ get_user_pass root logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | +++ local user=root logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | ++++ get_cluster_name logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:30:59 | demand-backup-cloud/6-delete-data | +++ base64 --decode logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ local pod= logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | +++ get_client_pod logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ client_pod=mysql-client logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ wait_pod mysql-client logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ local pod=mysql-client logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:31:00 | demand-backup-cloud/6-delete-data | ++ set +o xtrace logger.go:42: 10:31:01 | demand-backup-cloud/6-delete-data | mysql-clienttrue logger.go:42: 10:31:01 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:31:01 | demand-backup-cloud/6-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:31:01 | demand-backup-cloud/6-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++ : logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | + data= logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 06-delete-data-s3-1 --from-literal=data= logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | configmap/06-delete-data-s3-1 created logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | + for i in 0 1 2 logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | +++ get_user_pass root logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | +++ local user=root logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++++ get_cluster_name logger.go:42: 10:31:02 | demand-backup-cloud/6-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | +++ base64 --decode logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | ++ local pod= logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | +++ get_client_pod logger.go:42: 10:31:03 | demand-backup-cloud/6-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ client_pod=mysql-client logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ wait_pod mysql-client logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ local pod=mysql-client logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ set +o xtrace logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | mysql-clienttrue logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:31:04 | demand-backup-cloud/6-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:31:05 | demand-backup-cloud/6-delete-data | ++ : logger.go:42: 10:31:05 | demand-backup-cloud/6-delete-data | + data= logger.go:42: 10:31:05 | demand-backup-cloud/6-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 06-delete-data-s3-2 --from-literal=data= logger.go:42: 10:31:06 | demand-backup-cloud/6-delete-data | configmap/06-delete-data-s3-2 created logger.go:42: 10:31:07 | demand-backup-cloud/6-delete-data | test step completed 6-delete-data logger.go:42: 10:31:07 | demand-backup-cloud/7-restore-from-s3 | starting test step 7-restore-from-s3 logger.go:42: 10:31:08 | demand-backup-cloud/7-restore-from-s3 | PerconaServerMySQLRestore:kuttl-test-optimal-kodiak/demand-backup-cloud-restore-s3 created logger.go:42: 10:35:23 | demand-backup-cloud/7-restore-from-s3 | test step completed 7-restore-from-s3 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | starting test step 8-read-data logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 08-read-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | + source ../../functions logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ realpath ../../.. logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++++ pwd logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++ test_name=demand-backup-cloud logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ [[ -z 8.4 ]] logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++++ which gdate logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++++ which date logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ date=/usr/sbin/date logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ oc get projects logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ : logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ kubectl get nodes logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ grep '^minikube' logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ which gsed logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ which sed logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++ sed=/usr/sbin/sed logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | ++ oc get projects logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ kubectl version -o json logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | +++ grep '\-eks\-' logger.go:42: 10:35:23 | demand-backup-cloud/8-read-data | grep: warning: stray \ before - logger.go:42: 10:35:24 | demand-backup-cloud/8-read-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:35:24 | demand-backup-cloud/8-read-data | ++ '[' ']' logger.go:42: 10:35:24 | demand-backup-cloud/8-read-data | ++ EKS=0 logger.go:42: 10:35:24 | demand-backup-cloud/8-read-data | ++ get_cluster_name logger.go:42: 10:35:24 | demand-backup-cloud/8-read-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | + cluster_name=demand-backup-cloud logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | + for i in 0 1 2 logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | +++ get_user_pass root logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | +++ local user=root logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | ++++ get_cluster_name logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:35:25 | demand-backup-cloud/8-read-data | +++ base64 --decode logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ local pod= logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | +++ get_client_pod logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ client_pod=mysql-client logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ wait_pod mysql-client logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ local pod=mysql-client logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:35:26 | demand-backup-cloud/8-read-data | ++ set +o xtrace logger.go:42: 10:35:27 | demand-backup-cloud/8-read-data | mysql-clienttrue logger.go:42: 10:35:27 | demand-backup-cloud/8-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:27 | demand-backup-cloud/8-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:35:27 | demand-backup-cloud/8-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | + data=100500 logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 08-read-data-s3-0 --from-literal=data=100500 logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | configmap/08-read-data-s3-0 created logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | + for i in 0 1 2 logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | +++ get_user_pass root logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | +++ local user=root logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | ++++ get_cluster_name logger.go:42: 10:35:28 | demand-backup-cloud/8-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | +++ base64 --decode logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | ++ local pod= logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | +++ get_client_pod logger.go:42: 10:35:29 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ client_pod=mysql-client logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ wait_pod mysql-client logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ local pod=mysql-client logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ set +o xtrace logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | mysql-clienttrue logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:35:30 | demand-backup-cloud/8-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:35:31 | demand-backup-cloud/8-read-data | + data=100500 logger.go:42: 10:35:31 | demand-backup-cloud/8-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 08-read-data-s3-1 --from-literal=data=100500 logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | configmap/08-read-data-s3-1 created logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | + for i in 0 1 2 logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | +++ get_user_pass root logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | +++ local user=root logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | ++++ get_cluster_name logger.go:42: 10:35:32 | demand-backup-cloud/8-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | +++ base64 --decode logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | ++ local pod= logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | +++ get_client_pod logger.go:42: 10:35:33 | demand-backup-cloud/8-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ client_pod=mysql-client logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ wait_pod mysql-client logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ local pod=mysql-client logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ set +o xtrace logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | mysql-clienttrue logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:35:34 | demand-backup-cloud/8-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:35:35 | demand-backup-cloud/8-read-data | + data=100500 logger.go:42: 10:35:35 | demand-backup-cloud/8-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 08-read-data-s3-2 --from-literal=data=100500 logger.go:42: 10:35:36 | demand-backup-cloud/8-read-data | configmap/08-read-data-s3-2 created logger.go:42: 10:35:37 | demand-backup-cloud/8-read-data | test step completed 8-read-data logger.go:42: 10:35:37 | demand-backup-cloud/9-create-backup-gcp | starting test step 9-create-backup-gcp logger.go:42: 10:35:38 | demand-backup-cloud/9-create-backup-gcp | PerconaServerMySQLBackup:kuttl-test-optimal-kodiak/demand-backup-cloud-gcp created logger.go:42: 10:35:51 | demand-backup-cloud/9-create-backup-gcp | test step completed 9-create-backup-gcp logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | starting test step 10-delete-data logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 10-delete-data-gcp-${i} --from-literal=data="${data}" done] logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | + source ../../functions logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ realpath ../../.. logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++++ pwd logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++ test_name=demand-backup-cloud logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ [[ -z 8.4 ]] logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++++ which gdate logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++++ which date logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ date=/usr/sbin/date logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ oc get projects logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ : logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ kubectl get nodes logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ grep '^minikube' logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ which gsed logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ which sed logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | ++ oc get projects logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ kubectl version -o json logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | +++ grep '\-eks\-' logger.go:42: 10:35:51 | demand-backup-cloud/10-delete-data | grep: warning: stray \ before - logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ '[' ']' logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ EKS=0 logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | +++ get_cluster_name logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ get_haproxy_svc demand-backup-cloud logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ local cluster=demand-backup-cloud logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ echo demand-backup-cloud-haproxy logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-cloud-haproxy' logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | + local 'host=-h demand-backup-cloud-haproxy' logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ get_user_pass root logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | ++ local user=root logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | +++ get_cluster_name logger.go:42: 10:35:52 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | ++ local secret=demand-backup-cloud-secrets logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | ++ base64 --decode logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | + local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | + local pod= logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | ++ get_client_pod logger.go:42: 10:35:53 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + client_pod=mysql-client logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + wait_pod mysql-client logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + local pod=mysql-client logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + local ns=kuttl-test-optimal-kodiak logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + set +o xtrace logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | mysql-clienttrue logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-cloud-haproxy -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:35:54 | demand-backup-cloud/10-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | + : logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++ get_cluster_name logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | + cluster_name=demand-backup-cloud logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | + for i in 0 1 2 logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | +++ get_user_pass root logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | +++ local user=root logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++++ get_cluster_name logger.go:42: 10:35:56 | demand-backup-cloud/10-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | +++ base64 --decode logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | ++ local pod= logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | +++ get_client_pod logger.go:42: 10:35:57 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ client_pod=mysql-client logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ wait_pod mysql-client logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ local pod=mysql-client logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ set +o xtrace logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | mysql-clienttrue logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:35:58 | demand-backup-cloud/10-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:35:59 | demand-backup-cloud/10-delete-data | ++ : logger.go:42: 10:35:59 | demand-backup-cloud/10-delete-data | + data= logger.go:42: 10:35:59 | demand-backup-cloud/10-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 10-delete-data-gcp-0 --from-literal=data= logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | configmap/10-delete-data-gcp-0 created logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | + for i in 0 1 2 logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | +++ get_user_pass root logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | +++ local user=root logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | ++++ get_cluster_name logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:36:00 | demand-backup-cloud/10-delete-data | +++ base64 --decode logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ local pod= logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | +++ get_client_pod logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ client_pod=mysql-client logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ wait_pod mysql-client logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ local pod=mysql-client logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:36:01 | demand-backup-cloud/10-delete-data | ++ set +o xtrace logger.go:42: 10:36:02 | demand-backup-cloud/10-delete-data | mysql-clienttrue logger.go:42: 10:36:02 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:36:02 | demand-backup-cloud/10-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:36:02 | demand-backup-cloud/10-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:36:03 | demand-backup-cloud/10-delete-data | ++ : logger.go:42: 10:36:03 | demand-backup-cloud/10-delete-data | + data= logger.go:42: 10:36:03 | demand-backup-cloud/10-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 10-delete-data-gcp-1 --from-literal=data= logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | configmap/10-delete-data-gcp-1 created logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | + for i in 0 1 2 logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | +++ get_user_pass root logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | +++ local user=root logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | ++++ get_cluster_name logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:36:04 | demand-backup-cloud/10-delete-data | +++ base64 --decode logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ local pod= logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | +++ get_client_pod logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ client_pod=mysql-client logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ wait_pod mysql-client logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ local pod=mysql-client logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:36:05 | demand-backup-cloud/10-delete-data | ++ set +o xtrace logger.go:42: 10:36:06 | demand-backup-cloud/10-delete-data | mysql-clienttrue logger.go:42: 10:36:06 | demand-backup-cloud/10-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:36:06 | demand-backup-cloud/10-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:36:06 | demand-backup-cloud/10-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:36:07 | demand-backup-cloud/10-delete-data | ++ : logger.go:42: 10:36:07 | demand-backup-cloud/10-delete-data | + data= logger.go:42: 10:36:07 | demand-backup-cloud/10-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 10-delete-data-gcp-2 --from-literal=data= logger.go:42: 10:36:07 | demand-backup-cloud/10-delete-data | configmap/10-delete-data-gcp-2 created logger.go:42: 10:36:08 | demand-backup-cloud/10-delete-data | test step completed 10-delete-data logger.go:42: 10:36:08 | demand-backup-cloud/11-restore-from-gcp | starting test step 11-restore-from-gcp logger.go:42: 10:36:09 | demand-backup-cloud/11-restore-from-gcp | PerconaServerMySQLRestore:kuttl-test-optimal-kodiak/demand-backup-cloud-restore-gcp created logger.go:42: 10:40:15 | demand-backup-cloud/11-restore-from-gcp | test step completed 11-restore-from-gcp logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | starting test step 12-read-data logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 12-read-data-gcp-${i} --from-literal=data="${data}" done] logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | + source ../../functions logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ realpath ../../.. logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++++ pwd logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++ test_name=demand-backup-cloud logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ [[ -z 8.4 ]] logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++++ which gdate logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | ++++ which date logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ date=/usr/sbin/date logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ oc get projects logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ : logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ kubectl get nodes logger.go:42: 10:40:15 | demand-backup-cloud/12-read-data | +++ grep '^minikube' logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | +++ which gsed logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | +++ which sed logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ sed=/usr/sbin/sed logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ oc get projects logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | +++ kubectl version -o json logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | +++ grep '\-eks\-' logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | grep: warning: stray \ before - logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ '[' ']' logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ EKS=0 logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ get_cluster_name logger.go:42: 10:40:16 | demand-backup-cloud/12-read-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | + cluster_name=demand-backup-cloud logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | + for i in 0 1 2 logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | +++ get_user_pass root logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | +++ local user=root logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | ++++ get_cluster_name logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:17 | demand-backup-cloud/12-read-data | +++ base64 --decode logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ local pod= logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | +++ get_client_pod logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ client_pod=mysql-client logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ wait_pod mysql-client logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ local pod=mysql-client logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:18 | demand-backup-cloud/12-read-data | ++ set +o xtrace logger.go:42: 10:40:19 | demand-backup-cloud/12-read-data | mysql-clienttrue logger.go:42: 10:40:19 | demand-backup-cloud/12-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:19 | demand-backup-cloud/12-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:19 | demand-backup-cloud/12-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:20 | demand-backup-cloud/12-read-data | + data=100500 logger.go:42: 10:40:20 | demand-backup-cloud/12-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 12-read-data-gcp-0 --from-literal=data=100500 logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | configmap/12-read-data-gcp-0 created logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | + for i in 0 1 2 logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | +++ get_user_pass root logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | +++ local user=root logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | ++++ get_cluster_name logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:21 | demand-backup-cloud/12-read-data | +++ base64 --decode logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ local pod= logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | +++ get_client_pod logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ client_pod=mysql-client logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ wait_pod mysql-client logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ local pod=mysql-client logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:22 | demand-backup-cloud/12-read-data | ++ set +o xtrace logger.go:42: 10:40:23 | demand-backup-cloud/12-read-data | mysql-clienttrue logger.go:42: 10:40:23 | demand-backup-cloud/12-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:23 | demand-backup-cloud/12-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:23 | demand-backup-cloud/12-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | + data=100500 logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 12-read-data-gcp-1 --from-literal=data=100500 logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | configmap/12-read-data-gcp-1 created logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | + for i in 0 1 2 logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | +++ get_user_pass root logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | +++ local user=root logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | ++++ get_cluster_name logger.go:42: 10:40:24 | demand-backup-cloud/12-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:25 | demand-backup-cloud/12-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:25 | demand-backup-cloud/12-read-data | +++ base64 --decode logger.go:42: 10:40:25 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ local pod= logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | +++ get_client_pod logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ client_pod=mysql-client logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ wait_pod mysql-client logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ local pod=mysql-client logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:26 | demand-backup-cloud/12-read-data | ++ set +o xtrace logger.go:42: 10:40:27 | demand-backup-cloud/12-read-data | mysql-clienttrue logger.go:42: 10:40:27 | demand-backup-cloud/12-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:27 | demand-backup-cloud/12-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:27 | demand-backup-cloud/12-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:28 | demand-backup-cloud/12-read-data | + data=100500 logger.go:42: 10:40:28 | demand-backup-cloud/12-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 12-read-data-gcp-2 --from-literal=data=100500 logger.go:42: 10:40:28 | demand-backup-cloud/12-read-data | configmap/12-read-data-gcp-2 created logger.go:42: 10:40:29 | demand-backup-cloud/12-read-data | test step completed 12-read-data logger.go:42: 10:40:29 | demand-backup-cloud/13-create-backup-azure | starting test step 13-create-backup-azure logger.go:42: 10:40:30 | demand-backup-cloud/13-create-backup-azure | PerconaServerMySQLBackup:kuttl-test-optimal-kodiak/demand-backup-cloud-azure created logger.go:42: 10:40:43 | demand-backup-cloud/13-create-backup-azure | test step completed 13-create-backup-azure logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | starting test step 14-delete-data logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 14-delete-data-azure-${i} --from-literal=data="${data}" done] logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | + source ../../functions logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ realpath ../../.. logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++++ pwd logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++ test_name=demand-backup-cloud logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ [[ -z 8.4 ]] logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++++ which gdate logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | ++++ which date logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ date=/usr/sbin/date logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ oc get projects logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ : logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ kubectl get nodes logger.go:42: 10:40:43 | demand-backup-cloud/14-delete-data | +++ grep '^minikube' logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ which gsed logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ which sed logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | ++ oc get projects logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ kubectl version -o json logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ grep '\-eks\-' logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | grep: warning: stray \ before - logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | ++ '[' ']' logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | ++ EKS=0 logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ get_cluster_name logger.go:42: 10:40:44 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ get_haproxy_svc demand-backup-cloud logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ local cluster=demand-backup-cloud logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ echo demand-backup-cloud-haproxy logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-cloud-haproxy' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | + local 'host=-h demand-backup-cloud-haproxy' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ get_user_pass root logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ local user=root logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | +++ get_cluster_name logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:45 | demand-backup-cloud/14-delete-data | ++ base64 --decode logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + local pod= logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | ++ get_client_pod logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + client_pod=mysql-client logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + wait_pod mysql-client logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + local pod=mysql-client logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:46 | demand-backup-cloud/14-delete-data | + set +o xtrace logger.go:42: 10:40:47 | demand-backup-cloud/14-delete-data | mysql-clienttrue logger.go:42: 10:40:47 | demand-backup-cloud/14-delete-data | + kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-cloud-haproxy -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:47 | demand-backup-cloud/14-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:47 | demand-backup-cloud/14-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:48 | demand-backup-cloud/14-delete-data | + : logger.go:42: 10:40:48 | demand-backup-cloud/14-delete-data | ++ get_cluster_name logger.go:42: 10:40:48 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | + cluster_name=demand-backup-cloud logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | + for i in 0 1 2 logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | +++ get_user_pass root logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | +++ local user=root logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | ++++ get_cluster_name logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:49 | demand-backup-cloud/14-delete-data | +++ base64 --decode logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ local pod= logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | +++ get_client_pod logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ client_pod=mysql-client logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ wait_pod mysql-client logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ local pod=mysql-client logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:50 | demand-backup-cloud/14-delete-data | ++ set +o xtrace logger.go:42: 10:40:51 | demand-backup-cloud/14-delete-data | mysql-clienttrue logger.go:42: 10:40:51 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:51 | demand-backup-cloud/14-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:51 | demand-backup-cloud/14-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++ : logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | + data= logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 14-delete-data-azure-0 --from-literal=data= logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | configmap/14-delete-data-azure-0 created logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | + for i in 0 1 2 logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | +++ get_user_pass root logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | +++ local user=root logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++++ get_cluster_name logger.go:42: 10:40:52 | demand-backup-cloud/14-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:53 | demand-backup-cloud/14-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:53 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:53 | demand-backup-cloud/14-delete-data | +++ base64 --decode logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ local pod= logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | +++ get_client_pod logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ client_pod=mysql-client logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ wait_pod mysql-client logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ local pod=mysql-client logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:54 | demand-backup-cloud/14-delete-data | ++ set +o xtrace logger.go:42: 10:40:55 | demand-backup-cloud/14-delete-data | mysql-clienttrue logger.go:42: 10:40:55 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:55 | demand-backup-cloud/14-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:55 | demand-backup-cloud/14-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++ : logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | + data= logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 14-delete-data-azure-1 --from-literal=data= logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | configmap/14-delete-data-azure-1 created logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | + for i in 0 1 2 logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | +++ get_user_pass root logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | +++ local user=root logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++++ get_cluster_name logger.go:42: 10:40:56 | demand-backup-cloud/14-delete-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | +++ base64 --decode logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | ++ local pod= logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | +++ get_client_pod logger.go:42: 10:40:57 | demand-backup-cloud/14-delete-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ client_pod=mysql-client logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ wait_pod mysql-client logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ local pod=mysql-client logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ set +o xtrace logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | mysql-clienttrue logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:40:58 | demand-backup-cloud/14-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:40:59 | demand-backup-cloud/14-delete-data | ++ : logger.go:42: 10:40:59 | demand-backup-cloud/14-delete-data | + data= logger.go:42: 10:40:59 | demand-backup-cloud/14-delete-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 14-delete-data-azure-2 --from-literal=data= logger.go:42: 10:41:00 | demand-backup-cloud/14-delete-data | configmap/14-delete-data-azure-2 created logger.go:42: 10:41:01 | demand-backup-cloud/14-delete-data | test step completed 14-delete-data logger.go:42: 10:41:01 | demand-backup-cloud/15-restore-from-azure | starting test step 15-restore-from-azure logger.go:42: 10:41:02 | demand-backup-cloud/15-restore-from-azure | PerconaServerMySQLRestore:kuttl-test-optimal-kodiak/demand-backup-cloud-restore-azure created logger.go:42: 10:45:05 | demand-backup-cloud/15-restore-from-azure | test step completed 15-restore-from-azure logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | starting test step 16-read-data logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 16-read-data-azure-${i} --from-literal=data="${data}" done] logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | + source ../../functions logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ realpath ../../.. logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++++ pwd logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++ test_name=demand-backup-cloud logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ [[ -z 8.4 ]] logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ MYSQL_VERSION=8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++++ which gdate logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++++ which date logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ date=/usr/sbin/date logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ oc get projects logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ : logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ kubectl get nodes logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ grep '^minikube' logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ which gsed logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ which sed logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++ sed=/usr/sbin/sed logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | ++ oc get projects logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ kubectl version -o json logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | +++ grep '\-eks\-' logger.go:42: 10:45:05 | demand-backup-cloud/16-read-data | grep: warning: stray \ before - logger.go:42: 10:45:06 | demand-backup-cloud/16-read-data | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:45:06 | demand-backup-cloud/16-read-data | ++ '[' ']' logger.go:42: 10:45:06 | demand-backup-cloud/16-read-data | ++ EKS=0 logger.go:42: 10:45:06 | demand-backup-cloud/16-read-data | ++ get_cluster_name logger.go:42: 10:45:06 | demand-backup-cloud/16-read-data | ++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | + cluster_name=demand-backup-cloud logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | + for i in 0 1 2 logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | ++ local 'host=-h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql' logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | +++ get_user_pass root logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | +++ local user=root logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | ++++ get_cluster_name logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | +++ base64 --decode logger.go:42: 10:45:07 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ local pod= logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | +++ get_client_pod logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ client_pod=mysql-client logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ wait_pod mysql-client logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ local pod=mysql-client logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:45:08 | demand-backup-cloud/16-read-data | ++ set +o xtrace logger.go:42: 10:45:09 | demand-backup-cloud/16-read-data | mysql-clienttrue logger.go:42: 10:45:09 | demand-backup-cloud/16-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-0.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:09 | demand-backup-cloud/16-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:45:09 | demand-backup-cloud/16-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | + data=100500 logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 16-read-data-azure-0 --from-literal=data=100500 logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | configmap/16-read-data-azure-0 created logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | + for i in 0 1 2 logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | ++ local 'host=-h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql' logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | +++ get_user_pass root logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | +++ local user=root logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | ++++ get_cluster_name logger.go:42: 10:45:10 | demand-backup-cloud/16-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | +++ base64 --decode logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | ++ local pod= logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | +++ get_client_pod logger.go:42: 10:45:11 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ client_pod=mysql-client logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ wait_pod mysql-client logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ local pod=mysql-client logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ set +o xtrace logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | mysql-clienttrue logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-1.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:45:12 | demand-backup-cloud/16-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:45:13 | demand-backup-cloud/16-read-data | + data=100500 logger.go:42: 10:45:13 | demand-backup-cloud/16-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 16-read-data-azure-1 --from-literal=data=100500 logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | configmap/16-read-data-azure-1 created logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | + for i in 0 1 2 logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | ++ local 'host=-h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql' logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | +++ get_user_pass root logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | +++ local user=root logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | ++++ get_cluster_name logger.go:42: 10:45:14 | demand-backup-cloud/16-read-data | ++++ kubectl -n kuttl-test-optimal-kodiak get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | +++ local secret=demand-backup-cloud-secrets logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get secret demand-backup-cloud-secrets -o 'jsonpath={.data.root}' logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | +++ base64 --decode logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | ++ local 'user=-uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | ++ local pod= logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | +++ get_client_pod logger.go:42: 10:45:15 | demand-backup-cloud/16-read-data | +++ kubectl -n kuttl-test-optimal-kodiak get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ client_pod=mysql-client logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ wait_pod mysql-client logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ local pod=mysql-client logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ local ns=kuttl-test-optimal-kodiak logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ set +o xtrace logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | mysql-clienttrue logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ kubectl -n kuttl-test-optimal-kodiak exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-cloud-mysql-2.demand-backup-cloud-mysql -uroot -p'\''Ehb-1ISwg9T[YQX0'\''' logger.go:42: 10:45:16 | demand-backup-cloud/16-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 10:45:17 | demand-backup-cloud/16-read-data | + data=100500 logger.go:42: 10:45:17 | demand-backup-cloud/16-read-data | + kubectl create configmap -n kuttl-test-optimal-kodiak 16-read-data-azure-2 --from-literal=data=100500 logger.go:42: 10:45:18 | demand-backup-cloud/16-read-data | configmap/16-read-data-azure-2 created logger.go:42: 10:45:19 | demand-backup-cloud/16-read-data | test step completed 16-read-data logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | starting test step 17-delete-all-backups logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | running command: [sh -c set -o errexit set -o xtrace source ../../functions kubectl delete ps-backup --all -n "${NAMESPACE}"] logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | + source ../../functions logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ realpath ../../.. logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++++ pwd logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++ test_name=demand-backup-cloud logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ [[ -z 8.4 ]] logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ MYSQL_VERSION=8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export MINIO_VER=5.4.0 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ MINIO_VER=5.4.0 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ export VAULT_VER=0.16.1 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ VAULT_VER=0.16.1 logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++++ which gdate logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++++ which date logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ date=/usr/sbin/date logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ oc get projects logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ : logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ kubectl get nodes logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ grep '^minikube' logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ which gsed logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ which sed logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++ sed=/usr/sbin/sed logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | ++ oc get projects logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ kubectl version -o json logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | +++ grep '\-eks\-' logger.go:42: 10:45:19 | demand-backup-cloud/17-delete-all-backups | grep: warning: stray \ before - logger.go:42: 10:45:20 | demand-backup-cloud/17-delete-all-backups | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:45:20 | demand-backup-cloud/17-delete-all-backups | ++ '[' ']' logger.go:42: 10:45:20 | demand-backup-cloud/17-delete-all-backups | ++ EKS=0 logger.go:42: 10:45:20 | demand-backup-cloud/17-delete-all-backups | + kubectl delete ps-backup --all -n kuttl-test-optimal-kodiak logger.go:42: 10:45:21 | demand-backup-cloud/17-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup-cloud-azure" deleted from kuttl-test-optimal-kodiak namespace logger.go:42: 10:45:21 | demand-backup-cloud/17-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup-cloud-gcp" deleted from kuttl-test-optimal-kodiak namespace logger.go:42: 10:45:21 | demand-backup-cloud/17-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup-cloud-s3" deleted from kuttl-test-optimal-kodiak namespace logger.go:42: 10:45:24 | demand-backup-cloud/17-delete-all-backups | test step completed 17-delete-all-backups logger.go:42: 10:45:24 | demand-backup-cloud/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 10:45:25 | demand-backup-cloud/98-drop-finalizer | PerconaServerMySQL:kuttl-test-optimal-kodiak/demand-backup-cloud updated logger.go:42: 10:45:25 | demand-backup-cloud/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/tests/demand-backup-cloud logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++ test_name=demand-backup-cloud logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/vars.sh logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/deploy logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/e2e-tests/conf logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup-cloud logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-1236 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-1236 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export VERSION=PR-1236-862a05f9 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ VERSION=PR-1236-862a05f9 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1236-862a05f9 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ [[ -z 8.4 ]] logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export MYSQL_VERSION=8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ MYSQL_VERSION=8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | ++++ which date logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ : logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 10:45:25 | demand-backup-cloud/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | +++ which gsed logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1236/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | +++ which sed logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | ++ sed=/usr/sbin/sed logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | +++ kubectl version -o json logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | +++ jq -r .serverVersion.gitVersion logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | +++ grep '\-eks\-' logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | grep: warning: stray \ before - logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | Warning: version difference between client (1.35) and server (1.32) exceeds the supported minor version skew of +/-1 logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | ++ '[' ']' logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | ++ EKS=0 logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 10:45:26 | demand-backup-cloud/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:45:27 | demand-backup-cloud/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted from ps-operator namespace logger.go:42: 10:45:27 | demand-backup-cloud/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 10:45:27 | demand-backup-cloud/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 10:45:27 | demand-backup-cloud/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 10:45:28 | demand-backup-cloud/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 10:45:39 | demand-backup-cloud/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 10:45:40 | demand-backup-cloud | demand-backup-cloud events from ns kuttl-test-optimal-kodiak: logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:07 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/mysql-client to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:08 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-optimal-kodiak/datadir-demand-backup-cloud-mysql-0" pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Claim datadir-demand-backup-cloud-mysql-0 Pod demand-backup-cloud-mysql-0 in StatefulSet demand-backup-cloud-mysql success statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-cloud-mysql NoPods No matching pods found controllermanager logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Pod demand-backup-cloud-mysql-0 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 18.553s (18.554s including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:27 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:28 +0000 UTC Normal Pod demand-backup-cloud-orc-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:28 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:28 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulCreate create Pod demand-backup-cloud-orc-0 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:28 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-cloud-orchestrator NoPods No matching pods found controllermanager logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:28 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged -> Initializing ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:29 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 416ms (416ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:29 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:29 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:31 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-e8c364a6-add0-4782-be93-0015ade31362 pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:31 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:36 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:37 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.491s (1.491s including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:37 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:37 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:37 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:38 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 189ms (189ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:38 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:38 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:39 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:42 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 2.956s (2.956s including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:42 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:42 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:24:45 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:00 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 14.829s (14.829s including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:00 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:00 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:00 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:09 +0000 UTC Normal Pod demand-backup-cloud-orc-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:09 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:09 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulCreate create Pod demand-backup-cloud-orc-1 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:13 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 3.258s (3.258s including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:13 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:13 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:16 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.5s (1.5s including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 192ms (192ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:18 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 23.312s (23.312s including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.136s (5.136s including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:40 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:40 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:40 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-optimal-kodiak/datadir-demand-backup-cloud-mysql-1" pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:40 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Claim datadir-demand-backup-cloud-mysql-1 Pod demand-backup-cloud-mysql-1 in StatefulSet demand-backup-cloud-mysql success statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:40 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Pod demand-backup-cloud-mysql-1 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:41 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-cloud-haproxy NoPods No matching pods found controllermanager logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 195ms (195ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:42 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulCreate create Pod demand-backup-cloud-haproxy-0 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:43 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d6fa33a7-6980-42d1-9a1c-a986d9e5d5ac pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:44 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:44 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.271s (2.271s including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:47 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 190ms (190ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:47 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:47 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:48 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6fa33a7-6980-42d1-9a1c-a986d9e5d5ac" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:49 +0000 UTC Normal Pod demand-backup-cloud-orc-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:49 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulCreate create Pod demand-backup-cloud-orc-2 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:50 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:50 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 183ms (183ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:50 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:50 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 222ms (222ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:53 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:55 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.501s (1.501s including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 187ms (187ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:25:57 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:03 +0000 UTC Warning Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 15.303s (15.303s including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 25.255s (25.255s including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:40 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.537s (5.537s including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:40 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:40 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:42 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:26:42 Waiting for MySQL ready state 2026/03/17 10:26:42 MySQL is ready 2026/03/17 10:26:42 Peers: [3933326539613433.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6134316264363239.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:26:42 FQDN: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:26:42 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:26:42 lookup demand-backup-cloud-mysql-1 [10.53.234.6] 2026/03/17 10:26:42 PodIP: 10.53.234.6 2026/03/17 10:26:42 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.233.7] 2026/03/17 10:26:42 PrimaryIP: 10.53.233.7 2026/03/17 10:26:42 Donor: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:26:42 Opening connection to 10.53.234.6 2026/03/17 10:26:42 Clone required: true 2026/03/17 10:26:42 Checking if a clone in progress 2026/03/17 10:26:42 Clone in progress: false 2026/03/17 10:26:42 Cloning from demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:26:42 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:43 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:26:50 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 186ms (186ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:12 +0000 UTC Warning Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:22 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-2 WaitForPodScheduled waiting for pod demand-backup-cloud-mysql-2 to be scheduled persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:22 +0000 UTC Warning Pod demand-backup-cloud-mysql-2 Scheduling FailedScheduling 0/3 nodes are available: persistentvolumeclaim "datadir-demand-backup-cloud-mysql-2" not found. preemption: 0/3 nodes are available: 3 Preemption is not helpful for scheduling. default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:22 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Claim datadir-demand-backup-cloud-mysql-2 Pod demand-backup-cloud-mysql-2 in StatefulSet demand-backup-cloud-mysql success statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:22 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulCreate create Pod demand-backup-cloud-mysql-2 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:24 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:25 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-optimal-kodiak/datadir-demand-backup-cloud-mysql-2" pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:28 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-cloud-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-c680e1db-c664-4c9f-9511-7294e66e4570 pd.csi.storage.gke.io_gke-af07c50bf7654d99bec4-0109-bb91-vm_dfe8677f-ddfc-4c6e-abdf-d935afddf32d logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c680e1db-c664-4c9f-9511-7294e66e4570" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:35 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:35 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 144ms (144ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:35 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:35 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 190ms (190ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 203ms (203ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:41 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulCreate create Pod demand-backup-cloud-haproxy-1 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:43 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.46s (2.461s including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 181ms (181ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:46 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 14.207s (14.207s including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:52 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:57 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.251s (5.251s including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:57 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:27:57 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:03 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulCreate create Pod demand-backup-cloud-haproxy-2 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:04 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 233ms (233ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:04 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:04 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:05 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:28:05 Waiting for MySQL ready state 2026/03/17 10:28:05 MySQL is ready 2026/03/17 10:28:05 Peers: [3731386263663933.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 3933326539613433.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6134316264363239.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:28:05 FQDN: demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:28:05 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:28:05 lookup demand-backup-cloud-mysql-2 [10.53.232.8] 2026/03/17 10:28:05 PodIP: 10.53.232.8 2026/03/17 10:28:05 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.233.7] 2026/03/17 10:28:05 PrimaryIP: 10.53.233.7 2026/03/17 10:28:05 Donor: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:28:05 Opening connection to 10.53.232.8 2026/03/17 10:28:05 Clone required: true 2026/03/17 10:28:05 Checking if a clone in progress 2026/03/17 10:28:05 Clone in progress: false 2026/03/17 10:28:05 Cloning from demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:28:05 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:06 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.282s (2.282s including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 184ms (184ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:12 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 174ms (174ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:28:50 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:03 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged Ready -> Initializing ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:05 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:29:05 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:10 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:29:10 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:14 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:23 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 194ms (194ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 190ms (190ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 192ms (192ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 201ms (201ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:44 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:29:43 Waiting for MySQL ready state 2026/03/17 10:29:43 MySQL is ready 2026/03/17 10:29:43 Peers: [3731386263663933.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 3930343736623037.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 3933326539613433.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:29:43 FQDN: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:29:43 Primary: demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:29:43 lookup demand-backup-cloud-mysql-0 [10.53.233.10] 2026/03/17 10:29:43 PodIP: 10.53.233.10 2026/03/17 10:29:43 lookup demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.232.8] 2026/03/17 10:29:43 PrimaryIP: 10.53.232.8 2026/03/17 10:29:44 Donor: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:29:44 Opening connection to 10.53.233.10 2026/03/17 10:29:44 Clone required: true 2026/03/17 10:29:44 Checking if a clone in progress 2026/03/17 10:29:44 Clone in progress: false 2026/03/17 10:29:44 Cloning from demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:29:44 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:44 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:29:48 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 177ms (177ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:37 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-demand-backup-cloud-s3-aws-s3-t4fx4 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:37 +0000 UTC Normal Job.batch xb-demand-backup-cloud-s3-aws-s3 SuccessfulCreate Created pod: xb-demand-backup-cloud-s3-aws-s3-t4fx4 job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:38 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:38 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 229ms (229ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:38 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:38 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:39 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:40 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 182ms (182ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:40 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:40 +0000 UTC Normal Pod xb-demand-backup-cloud-s3-aws-s3-t4fx4.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:30:46 +0000 UTC Normal Job.batch xb-demand-backup-cloud-s3-aws-s3 Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulDelete delete Pod demand-backup-cloud-mysql-2 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:08 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulDelete delete Pod demand-backup-cloud-orc-2 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulDelete delete Pod demand-backup-cloud-haproxy-2 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Warning Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.53.233.8:3000/api/health": dial tcp 10.53.233.8:3000: connect: connection refused kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulDelete delete Pod demand-backup-cloud-orc-1 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:09 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulDelete delete Pod demand-backup-cloud-haproxy-1 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-haproxy SuccessfulDelete delete Pod demand-backup-cloud-haproxy-0 in StatefulSet demand-backup-cloud-haproxy successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:31:10 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:10 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-orc SuccessfulDelete delete Pod demand-backup-cloud-orc-0 in StatefulSet demand-backup-cloud-orc successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:15 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:31:15 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:20 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:29 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulDelete delete Pod demand-backup-cloud-mysql-1 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:32 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:33 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:33 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:33 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:33 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:31:33 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:33 +0000 UTC Normal StatefulSet.apps demand-backup-cloud-mysql SuccessfulDelete delete Pod demand-backup-cloud-mysql-0 in StatefulSet demand-backup-cloud-mysql successful statefulset-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:35 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:39 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-restore-demand-backup-cloud-restore-s3-4bxmm to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:39 +0000 UTC Warning Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm FailedAttachVolume Multi-Attach error for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:39 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-s3 SuccessfulCreate Created pod: xb-restore-demand-backup-cloud-restore-s3-4bxmm job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:31:59 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:00 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:00 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 183ms (183ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:00 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:00 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:03 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:03 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 204ms (204ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:03 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:03 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-s3-4bxmm.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:12 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:12 +0000 UTC Warning Pod demand-backup-cloud-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:12 +0000 UTC Normal Pod demand-backup-cloud-orc-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:12 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup-cloud ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:12 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-s3 Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:13 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:13 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 202ms (202ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:13 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:13 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 182ms (182ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 155ms (155ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:15 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:33 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 239ms (239ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:34 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 152ms (152ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 210ms (210ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 182ms (182ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:37 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:47 +0000 UTC Normal Pod demand-backup-cloud-orc-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:47 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:47 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 172ms (172ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:48 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:48 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:49 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:49 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 196ms (196ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:49 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:49 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:49 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:50 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 204ms (204ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:50 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:32:50 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:14 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:14 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:14 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 171ms (171ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:14 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:14 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 162ms (162ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 181ms (181ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:16 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:17 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6fa33a7-6980-42d1-9a1c-a986d9e5d5ac" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 191ms (191ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 170ms (170ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:21 +0000 UTC Normal Pod demand-backup-cloud-orc-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 183ms (183ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 181ms (181ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 197ms (197ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:22 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 173ms (174ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 184ms (184ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:24 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:33 +0000 UTC Warning Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:41 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:33:40 Waiting for MySQL ready state 2026/03/17 10:33:40 MySQL is ready 2026/03/17 10:33:40 Peers: [3435306465383835.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6337613966353635.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:33:40 FQDN: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:33:40 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:33:40 lookup demand-backup-cloud-mysql-1 [10.53.233.12] 2026/03/17 10:33:40 PodIP: 10.53.233.12 2026/03/17 10:33:40 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.8] 2026/03/17 10:33:40 PrimaryIP: 10.53.234.8 2026/03/17 10:33:40 Donor: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:33:40 Opening connection to 10.53.233.12 2026/03/17 10:33:40 Clone required: true 2026/03/17 10:33:40 Checking if a clone in progress 2026/03/17 10:33:40 Clone in progress: false 2026/03/17 10:33:40 Cloning from demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:33:41 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:41 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:33:44 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 211ms (211ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:15 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:23 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:23 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:23 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c680e1db-c664-4c9f-9511-7294e66e4570" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 226ms (226ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 234ms (234ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:25 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:25 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 176ms (176ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:25 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:25 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:25 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 216ms (216ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 194ms (195ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 151ms (151ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 134ms (134ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:42 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:43 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:43 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 215ms (215ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:43 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:43 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:44 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 235ms (235ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 216ms (216ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:34:44 Waiting for MySQL ready state 2026/03/17 10:34:44 MySQL is ready 2026/03/17 10:34:44 Peers: [3435306465383835.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 3661333037323733.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6337613966353635.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:34:44 FQDN: demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:34:44 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:34:44 lookup demand-backup-cloud-mysql-2 [10.53.232.13] 2026/03/17 10:34:44 PodIP: 10.53.232.13 2026/03/17 10:34:44 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.8] 2026/03/17 10:34:44 PrimaryIP: 10.53.234.8 2026/03/17 10:34:44 Donor: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:34:44 Opening connection to 10.53.232.13 2026/03/17 10:34:44 Clone required: true 2026/03/17 10:34:44 Checking if a clone in progress 2026/03/17 10:34:44 Clone in progress: false 2026/03/17 10:34:44 Cloning from demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:34:45 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:45 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:34:48 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 167ms (167ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:38 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-demand-backup-cloud-gcp-gcp-cs-6jd79 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:38 +0000 UTC Normal Job.batch xb-demand-backup-cloud-gcp-gcp-cs SuccessfulCreate Created pod: xb-demand-backup-cloud-gcp-gcp-cs-6jd79 job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:39 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:39 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 153ms (153ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:39 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:39 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:41 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:41 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 188ms (188ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:41 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:41 +0000 UTC Normal Pod xb-demand-backup-cloud-gcp-gcp-cs-6jd79.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:35:48 +0000 UTC Normal Job.batch xb-demand-backup-cloud-gcp-gcp-cs Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:09 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:10 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:11 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:11 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:11 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:11 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:11 +0000 UTC Warning Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.53.234.9:3000/api/health": dial tcp 10.53.234.9:3000: connect: connection refused kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:15 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:36:15 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:15 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:15 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:17 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:17 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:17 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:17 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:17 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:19 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:36:19 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:22 +0000 UTC Warning Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.53.232.11:3000/api/health": dial tcp 10.53.232.11:3000: connect: connection refused kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:24 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:51 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-restore-demand-backup-cloud-restore-gcp-4jr6w to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:51 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-gcp SuccessfulCreate Created pod: xb-restore-demand-backup-cloud-restore-gcp-4jr6w job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:55 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:56 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:57 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 222ms (222ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:57 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:57 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:58 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:58 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 153ms (153ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:58 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:36:58 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-gcp-4jr6w.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:06 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-gcp Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Warning Pod demand-backup-cloud-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Normal Pod demand-backup-cloud-orc-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 220ms (220ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:07 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 186ms (186ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 161ms (161ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:09 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:10 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 208ms (208ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:29 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 210ms (210ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 185ms (185ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 175ms (175ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:41 +0000 UTC Normal Pod demand-backup-cloud-orc-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:42 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:42 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 214ms (214ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:42 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:42 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 207ms (207ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 200ms (200ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:37:44 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 174ms (174ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:06 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 179ms (179ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 179ms (179ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:10 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6fa33a7-6980-42d1-9a1c-a986d9e5d5ac" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:11 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:11 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 207ms (207ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:11 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:11 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 192ms (192ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 157ms (157ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 208ms (208ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:13 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:14 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:16 +0000 UTC Normal Pod demand-backup-cloud-orc-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:16 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:17 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 204ms (204ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:17 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:17 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:18 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 168ms (168ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 196ms (196ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:19 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:24 +0000 UTC Warning Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:32 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:38:31 Waiting for MySQL ready state 2026/03/17 10:38:31 MySQL is ready 2026/03/17 10:38:31 Peers: [3461346537666132.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6262613531656337.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:38:31 FQDN: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:38:31 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:38:31 lookup demand-backup-cloud-mysql-1 [10.53.233.15] 2026/03/17 10:38:31 PodIP: 10.53.233.15 2026/03/17 10:38:31 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.11] 2026/03/17 10:38:31 PrimaryIP: 10.53.234.11 2026/03/17 10:38:31 Donor: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:38:31 Opening connection to 10.53.233.15 2026/03/17 10:38:31 Clone required: true 2026/03/17 10:38:31 Checking if a clone in progress 2026/03/17 10:38:31 Clone in progress: false 2026/03/17 10:38:31 Cloning from demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:38:32 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:32 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:38:36 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 182ms (182ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 241ms (241ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:11 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:11 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c680e1db-c664-4c9f-9511-7294e66e4570" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 219ms (219ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 200ms (200ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:12 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:18 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:18 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 204ms (204ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:18 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:18 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 155ms (155ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 214ms (214ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 188ms (188ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 192ms (192ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 199ms (199ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 164ms (164ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:31 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:38 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:39:38 Waiting for MySQL ready state 2026/03/17 10:39:38 MySQL is ready 2026/03/17 10:39:38 Peers: [3461346537666132.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 3534343430366530.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6262613531656337.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:39:38 FQDN: demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:39:38 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:39:38 lookup demand-backup-cloud-mysql-2 [10.53.232.18] 2026/03/17 10:39:38 PodIP: 10.53.232.18 2026/03/17 10:39:38 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.11] 2026/03/17 10:39:38 PrimaryIP: 10.53.234.11 2026/03/17 10:39:38 Donor: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:39:38 Opening connection to 10.53.232.18 2026/03/17 10:39:38 Clone required: true 2026/03/17 10:39:38 Checking if a clone in progress 2026/03/17 10:39:38 Clone in progress: false 2026/03/17 10:39:38 Cloning from demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:39:38 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:38 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:39:42 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 190ms (190ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:31 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-demand-backup-cloud-azure-azure-blob-wfqwt to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:31 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:31 +0000 UTC Normal Job.batch xb-demand-backup-cloud-azure-azure-blob SuccessfulCreate Created pod: xb-demand-backup-cloud-azure-azure-blob-wfqwt job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:32 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 155ms (155ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:32 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:32 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:33 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:34 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 199ms (199ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:34 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:34 +0000 UTC Normal Pod xb-demand-backup-cloud-azure-azure-blob-wfqwt.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:40:40 +0000 UTC Normal Job.batch xb-demand-backup-cloud-azure-azure-blob Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:02 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:03 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:41:03 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:04 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:04 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:05 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:05 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:06 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:06 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:41:06 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:06 +0000 UTC Warning Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.53.233.14:3000/api/health": dial tcp 10.53.233.14:3000: connect: connection refused kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:12 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:41:12 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:17 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:35 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:35 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:36 +0000 UTC Warning Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.53.232.16:3000/api/health": dial tcp 10.53.232.16:3000: connect: connection refused kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:43 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/xb-restore-demand-backup-cloud-restore-azure-f2w4n to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:43 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-azure SuccessfulCreate Created pod: xb-restore-demand-backup-cloud-restore-azure-f2w4n job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:48 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:50 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:51 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 174ms (175ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:51 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:51 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:53 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:53 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 153ms (153ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:53 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:41:53 +0000 UTC Normal Pod xb-restore-demand-backup-cloud-restore-azure-f2w4n.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:02 +0000 UTC Warning Pod demand-backup-cloud-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:02 +0000 UTC Normal Pod demand-backup-cloud-orc-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:02 +0000 UTC Normal Job.batch xb-restore-demand-backup-cloud-restore-azure Completed Job completed job-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:03 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:03 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 138ms (138ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:03 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:03 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 141ms (141ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 194ms (194ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:05 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:19 +0000 UTC Normal Pod demand-backup-cloud-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e8c364a6-add0-4782-be93-0015ade31362" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:20 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 218ms (218ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:21 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 187ms (187ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 201ms (201ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:22 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:23 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 189ms (189ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:23 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:23 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:37 +0000 UTC Normal Pod demand-backup-cloud-orc-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:37 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:37 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 224ms (224ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:37 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:37 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:39 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:39 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 178ms (178ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:39 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:39 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:39 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:40 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 205ms (205ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:40 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:40 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:42:56 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:01 +0000 UTC Normal Pod demand-backup-cloud-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6fa33a7-6980-42d1-9a1c-a986d9e5d5ac" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:02 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-0 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:02 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 167ms (167ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 208ms (208ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:03 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:04 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:04 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 184ms (184ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:04 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:04 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:04 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 182ms (182ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 173ms (173ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 173ms (173ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 177ms (177ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:11 +0000 UTC Normal Pod demand-backup-cloud-orc-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-orc-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:12 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:12 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 230ms (230ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:12 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:12 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 226ms (226ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 193ms (193ms including waiting). Image size: 73409902 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:14 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:22 +0000 UTC Warning Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:23 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:43:22 Waiting for MySQL ready state 2026/03/17 10:43:22 MySQL is ready 2026/03/17 10:43:22 Peers: [6439353761623632.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6661353231353466.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:43:22 FQDN: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:43:22 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:43:22 lookup demand-backup-cloud-mysql-1 [10.53.233.18] 2026/03/17 10:43:22 PodIP: 10.53.233.18 2026/03/17 10:43:22 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.14] 2026/03/17 10:43:22 PrimaryIP: 10.53.234.14 2026/03/17 10:43:23 Donor: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:43:23 Opening connection to 10.53.233.18 2026/03/17 10:43:23 Clone required: true 2026/03/17 10:43:23 Checking if a clone in progress 2026/03/17 10:43:23 Clone in progress: false 2026/03/17 10:43:23 Cloning from demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:43:23 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:24 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 195ms (195ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:43:58 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-mysql-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-68dn default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:05 +0000 UTC Normal Pod demand-backup-cloud-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c680e1db-c664-4c9f-9511-7294e66e4570" attachdetach-controller logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-1 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-8q2t default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 231ms (231ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:07 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 243ms (243ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:08 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 237ms (237ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 181ms (181ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 170ms (170ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 180ms (180ms including waiting). Image size: 443507032 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 179ms (179ms including waiting). Image size: 138708599 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:09 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:10 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:10 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-optimal-kodiak/demand-backup-cloud-haproxy-2 to gke-jen-ps-1236-862a05f9-default-pool-76c9cda8-5h0s default-scheduler logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1236-862a05f9" in 246ms (246ms including waiting). Image size: 110731832 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/03/17 10:44:27 Waiting for MySQL ready state 2026/03/17 10:44:27 MySQL is ready 2026/03/17 10:44:27 Peers: [6439353761623632.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6533326630373465.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak 6661353231353466.demand-backup-cloud-mysql-unready.kuttl-test-optimal-kodiak] 2026/03/17 10:44:27 FQDN: demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:44:27 Primary: demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak Replicas: [demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak demand-backup-cloud-mysql-2.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak] 2026/03/17 10:44:27 lookup demand-backup-cloud-mysql-2 [10.53.232.23] 2026/03/17 10:44:27 PodIP: 10.53.232.23 2026/03/17 10:44:27 lookup demand-backup-cloud-mysql-0.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak [10.53.234.14] 2026/03/17 10:44:27 PrimaryIP: 10.53.234.14 2026/03/17 10:44:27 Donor: demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:44:27 Opening connection to 10.53.232.23 2026/03/17 10:44:27 Clone required: true 2026/03/17 10:44:27 Checking if a clone in progress 2026/03/17 10:44:27 Clone in progress: false 2026/03/17 10:44:27 Cloning from demand-backup-cloud-mysql-1.demand-backup-cloud-mysql.kuttl-test-optimal-kodiak 2026/03/17 10:44:27 Clone finished. Restarting container... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:27 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:28 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 200ms (200ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 202ms (202ms including waiting). Image size: 103577541 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:29 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:44:30 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 183ms (183ms including waiting). Image size: 451034837 bytes. kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:25 +0000 UTC Warning PodDisruptionBudget.policy demand-backup-cloud-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "demand-backup-cloud-haproxy-0" controllermanager logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:25 +0000 UTC Warning PodDisruptionBudget.policy demand-backup-cloud-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "demand-backup-cloud-haproxy-1" controllermanager logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Warning Pod demand-backup-cloud-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:45:26 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:26 +0000 UTC Normal Pod demand-backup-cloud-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:27 +0000 UTC Warning Pod demand-backup-cloud-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:45:27 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:27 +0000 UTC Warning Pod demand-backup-cloud-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:45:27 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:31 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:45:31 MySQL state is not ready... kubelet logger.go:42: 10:45:40 | demand-backup-cloud | 2026-03-17 10:45:36 +0000 UTC Warning Pod demand-backup-cloud-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/03/17 10:45:36 MySQL state is not ready... kubelet logger.go:42: 10:45:41 | demand-backup-cloud | Deleting namespace "kuttl-test-optimal-kodiak" === NAME kuttl harness.go:404: run tests finished harness.go:511: cleaning up harness.go:568: removing temp folder: "" --- PASS: kuttl (1349.72s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/demand-backup-cloud (1348.90s) PASS