=== RUN kuttl harness.go:460: starting setup harness.go:258: running tests using configured kubeconfig. harness.go:281: Successful connection to cluster at: https://34.31.146.25 harness.go:366: running tests harness.go:77: going to run test suite with timeout of 180 seconds for each step harness.go:378: testsuite: e2e-tests/tests has 50 tests === RUN kuttl/harness === RUN kuttl/harness/gr-demand-backup-haproxy === PAUSE kuttl/harness/gr-demand-backup-haproxy === CONT kuttl/harness/gr-demand-backup-haproxy logger.go:42: 09:28:58 | gr-demand-backup-haproxy | Creating namespace "kuttl-test-native-chow" logger.go:42: 09:28:59 | gr-demand-backup-haproxy/0-minio-secret | starting test step 0-minio-secret logger.go:42: 09:28:59 | gr-demand-backup-haproxy/0-minio-secret | Secret:kuttl-test-native-chow/minio-secret created logger.go:42: 09:29:00 | gr-demand-backup-haproxy/0-minio-secret | test step completed 0-minio-secret logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | starting test step 1-deploy-operator logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_tls_cluster_secrets deploy_client deploy_minio] logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + source ../../functions logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ realpath ../../.. logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++++ pwd logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ [[ -z 8.4 ]] logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ MYSQL_VERSION=8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++++ which gdate logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++++ which date logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ export date=/usr/sbin/date logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ oc get projects logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ : logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ kubectl get nodes logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ grep '^minikube' logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ which gsed logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | +++ which sed logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | ++ sed=/usr/sbin/sed logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + init_temp_dir logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + rm -rf /tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + mkdir -p /tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + deploy_operator logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + destroy_operator logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 09:29:00 | gr-demand-backup-haproxy/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + true logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + true logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + create_namespace ps-operator logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + local namespace=ps-operator logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + [[ -n '' ]] logger.go:42: 09:29:01 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 09:29:02 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 09:29:02 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 09:29:03 | gr-demand-backup-haproxy/1-deploy-operator | namespace/ps-operator created logger.go:42: 09:29:03 | gr-demand-backup-haproxy/1-deploy-operator | + apply_crd logger.go:42: 09:29:03 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/crd.yaml logger.go:42: 09:29:04 | gr-demand-backup-haproxy/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 09:29:04 | gr-demand-backup-haproxy/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | + apply_rbac logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | + local rbac_file logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | + rbac_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cw-rbac.yaml logger.go:42: 09:29:05 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cw-rbac.yaml logger.go:42: 09:29:06 | gr-demand-backup-haproxy/1-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 09:29:07 | gr-demand-backup-haproxy/1-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 09:29:07 | gr-demand-backup-haproxy/1-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 09:29:07 | gr-demand-backup-haproxy/1-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + local operator_file logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + operator_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cw-operator.yaml logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "VERBOSE"' logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 09:29:08 | gr-demand-backup-haproxy/1-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cw-operator.yaml logger.go:42: 09:29:09 | gr-demand-backup-haproxy/1-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 09:29:09 | gr-demand-backup-haproxy/1-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 09:29:09 | gr-demand-backup-haproxy/1-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 09:29:09 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n kuttl-test-native-chow apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf/ssl-secret.yaml logger.go:42: 09:29:10 | gr-demand-backup-haproxy/1-deploy-operator | secret/test-ssl created logger.go:42: 09:29:10 | gr-demand-backup-haproxy/1-deploy-operator | + deploy_client logger.go:42: 09:29:10 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n kuttl-test-native-chow apply -f - logger.go:42: 09:29:10 | gr-demand-backup-haproxy/1-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:10 | gr-demand-backup-haproxy/1-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf/client.yaml logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | pod/mysql-client created logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | + deploy_minio logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | + local storage=2G logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | + local access_key logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | + local secret_key logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | ++ kubectl -n kuttl-test-native-chow get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | ++ base64 -d logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | + access_key='some-access$\n"-key' logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | ++ kubectl -n kuttl-test-native-chow get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 09:29:12 | gr-demand-backup-haproxy/1-deploy-operator | ++ base64 -d logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + secret_key='some-$\n"secret-key' logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + helm uninstall -n kuttl-test-native-chow minio-service logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + : logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + helm repo remove minio logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | Error: no repositories configured logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + : logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | "minio" has been added to your repositories logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | +++ printf %q 'some-access$\n"-key' logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | ++ printf %q 'some-access\$\\n\"-key' logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | +++ printf %q 'some-$\n"secret-key' logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | ++ printf %q 'some-\$\\n\"secret-key' logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-native-chow --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access\\\$\\\\n\\\"-key' --set 'users[0].secretKey=some-\\\$\\\\n\\\"secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + local max=10 logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + local delay=60 logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + shift 2 logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + local n=1 logger.go:42: 09:29:13 | gr-demand-backup-haproxy/1-deploy-operator | + helm install minio-service -n kuttl-test-native-chow --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access\\\$\\\\n\\\"-key' --set 'users[0].secretKey=some-\\\$\\\\n\\\"secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | NAME: minio-service logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | LAST DEPLOYED: Wed Apr 15 09:29:15 2026 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | NAMESPACE: kuttl-test-native-chow logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | STATUS: deployed logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | REVISION: 1 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | TEST SUITE: None logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | NOTES: logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | minio-service.kuttl-test-native-chow.cluster.local logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-native-chow -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-native-chow logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-native-chow minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-native-chow minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | 3. mc ls minio-service-local logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | ++ kubectl -n kuttl-test-native-chow get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | + MINIO_POD=minio-service-7b949cd57-l2mz8 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | + wait_pod minio-service-7b949cd57-l2mz8 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | + local pod=minio-service-7b949cd57-l2mz8 logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | + local ns=kuttl-test-native-chow logger.go:42: 09:29:42 | gr-demand-backup-haproxy/1-deploy-operator | + set +o xtrace logger.go:42: 09:29:43 | gr-demand-backup-haproxy/1-deploy-operator | minio-service-7b949cd57-l2mz8true logger.go:42: 09:29:43 | gr-demand-backup-haproxy/1-deploy-operator | + kubectl -n kuttl-test-native-chow run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access$\n"-key'\'' AWS_SECRET_ACCESS_KEY='\''some-$\n"secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 09:29:47 | gr-demand-backup-haproxy/1-deploy-operator | All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. logger.go:42: 09:29:47 | gr-demand-backup-haproxy/1-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 09:29:50 | gr-demand-backup-haproxy/1-deploy-operator | pod "aws-cli" deleted from kuttl-test-native-chow namespace logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | INFO Found 1 resource(s). logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | ASSERT PASS logger.go:42: 09:29:51 | gr-demand-backup-haproxy/1-deploy-operator | test step completed 1-deploy-operator logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | starting test step 2-create-cluster logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.backup.storages.minio.type="s3"' - \ | yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \ | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ | yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \ | yq eval '.spec.mysql.clusterType="group-replication"' - \ | yq eval ".spec.proxy.router.enabled=false" - \ | yq eval ".spec.proxy.haproxy.enabled=true" - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | + source ../../functions logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ realpath ../../.. logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++++ pwd logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ [[ -z 8.4 ]] logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ MYSQL_VERSION=8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++++ which gdate logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | ++++ which date logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ export date=/usr/sbin/date logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ date=/usr/sbin/date logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ oc get projects logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ : logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ kubectl get nodes logger.go:42: 09:29:51 | gr-demand-backup-haproxy/2-create-cluster | +++ grep '^minikube' logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | +++ which gsed logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | +++ which sed logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | ++ sed=/usr/sbin/sed logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + get_cr logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.backup.storages.minio.type="s3"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local name_suffix= logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + local cr_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cr.yaml logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | ++ detect_k8s_platform logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.endpointUrl="http://minio-service.kuttl-test-native-chow:9000"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | ++ set +x logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval '.spec.mysql.clusterType="group-replication"' - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval .spec.proxy.router.enabled=false - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + kubectl -n kuttl-test-native-chow apply -f - logger.go:42: 09:29:52 | gr-demand-backup-haproxy/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ echo gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + local platform=gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + local cr_name=gr-demand-backup-haproxy logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + crs=('/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cr.yaml') logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + local crs logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ get_test_cr gr-demand-backup-haproxy logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ local cr_name=gr-demand-backup-haproxy logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ detect_k8s_platform logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ set +x logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ echo gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ local platform=gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ get_platform_alias gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ set +x logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ echo gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ platform_alias=gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ get_storage_alias gke logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ set +x logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | +++ echo default logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ storage_alias=default logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ local default_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy.yaml logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ local platform_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-gke.yaml logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ local storage_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-default.yaml logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-default.yaml ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-default.yaml ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-gke.yaml ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy-gke.yaml ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf/gr-demand-backup-haproxy.yaml ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + local test_cr= logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + [[ -f '' ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + yq eval-all $'\n\t\tselect(fileIndex == 0) as $base |\n\t\tselect(fileIndex == 1) as $test |\n\t\t($base * ($test // {})) |\n\t\t.spec.backup.storages = ($test.spec.backup.storages // $base.spec.backup.storages // {}) |\n\t\t.spec.mysql.clusterType = ($test.spec.mysql.clusterType // "async") |\n\t\t.metadata.name = "gr-demand-backup-haproxy" |\n\t\t.spec.initContainer.image = "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" |\n\t\tdel(.spec.secretsName) |\n\t\t.spec.sslSecretName = "test-ssl" |\n\t\t.spec.upgradeOptions.apply = "disabled" |\n\t\t.spec.mysql.gracePeriod = 30 |\n\t\t.spec.orchestrator.enabled = true |\n\t\t.spec.mysql.image = "perconalab/percona-server-mysql-operator:main-psmysql8.4" |\n\t\t.spec.backup.image = "perconalab/percona-server-mysql-operator:main-backup8.4" |\n\t\t.spec.orchestrator.image = "perconalab/percona-server-mysql-operator:main-orchestrator" |\n\t\t.spec.proxy.router.image = "perconalab/percona-server-mysql-operator:main-router8.4" |\n\t\t.spec.toolkit.image = "perconalab/percona-server-mysql-operator:main-toolkit" |\n\t\t.spec.proxy.haproxy.image = "perconalab/percona-server-mysql-operator:main-haproxy" |\n\t\t.spec.pmm.image = "perconalab/pmm-client:3-dev-latest" |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "kuttl-test-native-chow")\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy/cr.yaml logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + [[ gke == minikube ]] logger.go:42: 09:29:53 | gr-demand-backup-haproxy/2-create-cluster | + cat logger.go:42: 09:29:55 | gr-demand-backup-haproxy/2-create-cluster | perconaservermysql.ps.percona.com/gr-demand-backup-haproxy created logger.go:42: 09:32:23 | gr-demand-backup-haproxy/2-create-cluster | test step completed 2-create-cluster logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | starting test step 3-write-data logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | running command: [sh -c set -o errexit set -o pipefail set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name))" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name))"] logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | + source ../../functions logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ realpath ../../.. logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++++ pwd logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ [[ -z 8.4 ]] logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ MYSQL_VERSION=8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++++ which gdate logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | ++++ which date logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ export date=/usr/sbin/date logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ date=/usr/sbin/date logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ oc get projects logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ : logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ kubectl get nodes logger.go:42: 09:32:23 | gr-demand-backup-haproxy/3-write-data | +++ grep '^minikube' logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ which gsed logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ which sed logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ sed=/usr/sbin/sed logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ get_cluster_name logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ get_haproxy_svc gr-demand-backup-haproxy logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ local cluster=gr-demand-backup-haproxy logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ echo gr-demand-backup-haproxy-haproxy logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | + local 'host=-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ get_user_pass root logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | ++ local user=root logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ get_cluster_name logger.go:42: 09:32:24 | gr-demand-backup-haproxy/3-write-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | ++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | ++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | ++ base64 --decode logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | + local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | + local pod= logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | ++ get_client_pod logger.go:42: 09:32:25 | gr-demand-backup-haproxy/3-write-data | ++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + client_pod=mysql-client logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + wait_pod mysql-client logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + local pod=mysql-client logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + local ns=kuttl-test-native-chow logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + set +o xtrace logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | mysql-clienttrue logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h gr-demand-backup-haproxy-haproxy -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:26 | gr-demand-backup-haproxy/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:27 | gr-demand-backup-haproxy/3-write-data | + : logger.go:42: 09:32:27 | gr-demand-backup-haproxy/3-write-data | +++ get_cluster_name logger.go:42: 09:32:27 | gr-demand-backup-haproxy/3-write-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ get_haproxy_svc gr-demand-backup-haproxy logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ local cluster=gr-demand-backup-haproxy logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ echo gr-demand-backup-haproxy-haproxy logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | + local 'host=-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ get_user_pass root logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ local user=root logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | +++ get_cluster_name logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:28 | gr-demand-backup-haproxy/3-write-data | ++ base64 --decode logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + local pod= logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | ++ get_client_pod logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | ++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + client_pod=mysql-client logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + wait_pod mysql-client logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + local pod=mysql-client logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + local ns=kuttl-test-native-chow logger.go:42: 09:32:29 | gr-demand-backup-haproxy/3-write-data | + set +o xtrace logger.go:42: 09:32:30 | gr-demand-backup-haproxy/3-write-data | mysql-clienttrue logger.go:42: 09:32:30 | gr-demand-backup-haproxy/3-write-data | + kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h gr-demand-backup-haproxy-haproxy -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:30 | gr-demand-backup-haproxy/3-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:30 | gr-demand-backup-haproxy/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:31 | gr-demand-backup-haproxy/3-write-data | + : logger.go:42: 09:32:31 | gr-demand-backup-haproxy/3-write-data | test step completed 3-write-data logger.go:42: 09:32:31 | gr-demand-backup-haproxy/4-create-backup-minio | starting test step 4-create-backup-minio logger.go:42: 09:32:31 | gr-demand-backup-haproxy/4-create-backup-minio | PerconaServerMySQLBackup:kuttl-test-native-chow/gr-demand-backup-haproxy-minio created logger.go:42: 09:32:43 | gr-demand-backup-haproxy/4-create-backup-minio | test step completed 4-create-backup-minio logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | starting test step 5-delete-data logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 04-delete-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | + source ../../functions logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ realpath ../../.. logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++++ pwd logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ [[ -z 8.4 ]] logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ MYSQL_VERSION=8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++++ which gdate logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | ++++ which date logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ export date=/usr/sbin/date logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ date=/usr/sbin/date logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ oc get projects logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ : logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl get nodes logger.go:42: 09:32:43 | gr-demand-backup-haproxy/5-delete-data | +++ grep '^minikube' logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ which gsed logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ which sed logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ get_cluster_name logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ get_haproxy_svc gr-demand-backup-haproxy logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ local cluster=gr-demand-backup-haproxy logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ echo gr-demand-backup-haproxy-haproxy logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | + local 'host=-h gr-demand-backup-haproxy-haproxy' logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ get_user_pass root logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | ++ local user=root logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ get_cluster_name logger.go:42: 09:32:44 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | ++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | ++ base64 --decode logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | + local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | + local pod= logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | ++ get_client_pod logger.go:42: 09:32:45 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + client_pod=mysql-client logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + wait_pod mysql-client logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + local pod=mysql-client logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + local ns=kuttl-test-native-chow logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + set +o xtrace logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | mysql-clienttrue logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-haproxy -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:46 | gr-demand-backup-haproxy/5-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:47 | gr-demand-backup-haproxy/5-delete-data | + : logger.go:42: 09:32:47 | gr-demand-backup-haproxy/5-delete-data | ++ get_cluster_name logger.go:42: 09:32:47 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | + cluster_name=gr-demand-backup-haproxy logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | + for i in 0 1 2 logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | +++ get_user_pass root logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | +++ local user=root logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | ++++ get_cluster_name logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:48 | gr-demand-backup-haproxy/5-delete-data | +++ base64 --decode logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ local pod= logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | +++ get_client_pod logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ client_pod=mysql-client logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ wait_pod mysql-client logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ local pod=mysql-client logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:32:49 | gr-demand-backup-haproxy/5-delete-data | ++ set +o xtrace logger.go:42: 09:32:50 | gr-demand-backup-haproxy/5-delete-data | mysql-clienttrue logger.go:42: 09:32:50 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:50 | gr-demand-backup-haproxy/5-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:50 | gr-demand-backup-haproxy/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++ : logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | + data= logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | + kubectl create configmap -n kuttl-test-native-chow 04-delete-data-minio-0 --from-literal=data= logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | configmap/04-delete-data-minio-0 created logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | + for i in 0 1 2 logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | +++ get_user_pass root logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | +++ local user=root logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++++ get_cluster_name logger.go:42: 09:32:51 | gr-demand-backup-haproxy/5-delete-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | +++ base64 --decode logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | ++ local pod= logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | +++ get_client_pod logger.go:42: 09:32:52 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ client_pod=mysql-client logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ wait_pod mysql-client logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ local pod=mysql-client logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ set +o xtrace logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | mysql-clienttrue logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:53 | gr-demand-backup-haproxy/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:54 | gr-demand-backup-haproxy/5-delete-data | ++ : logger.go:42: 09:32:54 | gr-demand-backup-haproxy/5-delete-data | + data= logger.go:42: 09:32:54 | gr-demand-backup-haproxy/5-delete-data | + kubectl create configmap -n kuttl-test-native-chow 04-delete-data-minio-1 --from-literal=data= logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | configmap/04-delete-data-minio-1 created logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | + for i in 0 1 2 logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql' logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | +++ get_user_pass root logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | +++ local user=root logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | ++++ get_cluster_name logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:32:55 | gr-demand-backup-haproxy/5-delete-data | +++ base64 --decode logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ local pod= logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | +++ get_client_pod logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ client_pod=mysql-client logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ wait_pod mysql-client logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ local pod=mysql-client logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ set +o xtrace logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | mysql-clienttrue logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:32:56 | gr-demand-backup-haproxy/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:32:57 | gr-demand-backup-haproxy/5-delete-data | ++ : logger.go:42: 09:32:57 | gr-demand-backup-haproxy/5-delete-data | + data= logger.go:42: 09:32:57 | gr-demand-backup-haproxy/5-delete-data | + kubectl create configmap -n kuttl-test-native-chow 04-delete-data-minio-2 --from-literal=data= logger.go:42: 09:32:58 | gr-demand-backup-haproxy/5-delete-data | configmap/04-delete-data-minio-2 created logger.go:42: 09:32:59 | gr-demand-backup-haproxy/5-delete-data | test step completed 5-delete-data logger.go:42: 09:32:59 | gr-demand-backup-haproxy/6-restore-from-minio | starting test step 6-restore-from-minio logger.go:42: 09:32:59 | gr-demand-backup-haproxy/6-restore-from-minio | PerconaServerMySQLRestore:kuttl-test-native-chow/gr-demand-backup-haproxy-restore-minio created logger.go:42: 09:37:52 | gr-demand-backup-haproxy/6-restore-from-minio | test step completed 6-restore-from-minio logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | starting test step 7-read-data logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 07-read-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | + source ../../functions logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ realpath ../../.. logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++++ pwd logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ [[ -z 8.4 ]] logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ MYSQL_VERSION=8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++++ which gdate logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++++ which date logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ export date=/usr/sbin/date logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ date=/usr/sbin/date logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ oc get projects logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ : logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ kubectl get nodes logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ grep '^minikube' logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ which gsed logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | +++ which sed logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ sed=/usr/sbin/sed logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ get_cluster_name logger.go:42: 09:37:52 | gr-demand-backup-haproxy/7-read-data | ++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | + cluster_name=gr-demand-backup-haproxy logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | + for i in 0 1 2 logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql' logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql' logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | +++ get_user_pass root logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | +++ local user=root logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | ++++ get_cluster_name logger.go:42: 09:37:53 | gr-demand-backup-haproxy/7-read-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | +++ base64 --decode logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ local pod= logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | +++ get_client_pod logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ client_pod=mysql-client logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ wait_pod mysql-client logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ local pod=mysql-client logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:37:54 | gr-demand-backup-haproxy/7-read-data | ++ set +o xtrace logger.go:42: 09:37:55 | gr-demand-backup-haproxy/7-read-data | mysql-clienttrue logger.go:42: 09:37:55 | gr-demand-backup-haproxy/7-read-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:37:55 | gr-demand-backup-haproxy/7-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:37:55 | gr-demand-backup-haproxy/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | + data=100500 logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | + kubectl create configmap -n kuttl-test-native-chow 07-read-data-minio-0 --from-literal=data=100500 logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | configmap/07-read-data-minio-0 created logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | + for i in 0 1 2 logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql' logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql' logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | +++ get_user_pass root logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | +++ local user=root logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | ++++ get_cluster_name logger.go:42: 09:37:56 | gr-demand-backup-haproxy/7-read-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | +++ base64 --decode logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | ++ local pod= logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | +++ get_client_pod logger.go:42: 09:37:57 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ client_pod=mysql-client logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ wait_pod mysql-client logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ local pod=mysql-client logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ set +o xtrace logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | mysql-clienttrue logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:37:58 | gr-demand-backup-haproxy/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:37:59 | gr-demand-backup-haproxy/7-read-data | + data=100500 logger.go:42: 09:37:59 | gr-demand-backup-haproxy/7-read-data | + kubectl create configmap -n kuttl-test-native-chow 07-read-data-minio-1 --from-literal=data=100500 logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | configmap/07-read-data-minio-1 created logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | + for i in 0 1 2 logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql' logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | ++ local 'host=-h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql' logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | +++ get_user_pass root logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | +++ local user=root logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | ++++ get_cluster_name logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | ++++ kubectl -n kuttl-test-native-chow get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | +++ local secret=gr-demand-backup-haproxy-secrets logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get secret gr-demand-backup-haproxy-secrets -o 'jsonpath={.data.root}' logger.go:42: 09:38:00 | gr-demand-backup-haproxy/7-read-data | +++ base64 --decode logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ local 'user=-uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ local pod= logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | +++ get_client_pod logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | +++ kubectl -n kuttl-test-native-chow get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ client_pod=mysql-client logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ wait_pod mysql-client logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ local pod=mysql-client logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ local ns=kuttl-test-native-chow logger.go:42: 09:38:01 | gr-demand-backup-haproxy/7-read-data | ++ set +o xtrace logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | mysql-clienttrue logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | ++ kubectl -n kuttl-test-native-chow exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql -uroot -p'\'',(GmHs-yc,2-OKHhV,J'\''' logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | + data=100500 logger.go:42: 09:38:02 | gr-demand-backup-haproxy/7-read-data | + kubectl create configmap -n kuttl-test-native-chow 07-read-data-minio-2 --from-literal=data=100500 logger.go:42: 09:38:03 | gr-demand-backup-haproxy/7-read-data | configmap/07-read-data-minio-2 created logger.go:42: 09:38:04 | gr-demand-backup-haproxy/7-read-data | test step completed 7-read-data logger.go:42: 09:38:04 | gr-demand-backup-haproxy/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 09:38:05 | gr-demand-backup-haproxy/98-drop-finalizer | PerconaServerMySQL:kuttl-test-native-chow/gr-demand-backup-haproxy updated logger.go:42: 09:38:05 | gr-demand-backup-haproxy/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++ test_name=gr-demand-backup-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/vars.sh logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/deploy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/conf logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/e2e-tests/tests/gr-demand-backup-haproxy/conf logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-1292 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-1292 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export VERSION=PR-1292-7efcb1c5 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ VERSION=PR-1292-7efcb1c5 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ [[ -z 8.4 ]] logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export MYSQL_VERSION=8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ MYSQL_VERSION=8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.19.1 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.19.1 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++++ which date logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ export date=/usr/sbin/date logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ : logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 09:38:05 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ which gsed logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1292/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | +++ which sed logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | ++ sed=/usr/sbin/sed logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted from ps-operator namespace logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 09:38:06 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 09:38:07 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 09:38:07 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 09:38:17 | gr-demand-backup-haproxy/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 09:38:17 | gr-demand-backup-haproxy | gr-demand-backup-haproxy events from ns kuttl-test-native-chow: logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:12 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-native-chow/mysql-client to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:12 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql8.4" already present on machine kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:12 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:12 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:17 +0000 UTC Normal ReplicaSet.apps minio-service-7b949cd57 SuccessfulCreate Created pod: minio-service-7b949cd57-l2mz8 replicaset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:17 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:17 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-7b949cd57 from 0 to 1 deployment-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:18 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:18 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/minio-service" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:19 +0000 UTC Normal Pod minio-service-post-job-hbkx9 Binding Scheduled Successfully assigned kuttl-test-native-chow/minio-service-post-job-hbkx9 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-l0gt default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:19 +0000 UTC Normal Pod minio-service-post-job-hbkx9.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:19 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-hbkx9 job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:20 +0000 UTC Normal Pod minio-service-post-job-hbkx9.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" in 1.215s (1.215s including waiting). Image size: 28122288 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:20 +0000 UTC Normal Pod minio-service-post-job-hbkx9.spec.containers{minio-make-user} Created Created container: minio-make-user kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:20 +0000 UTC Normal Pod minio-service-post-job-hbkx9.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:21 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-881098be-7b2b-4ae3-a718-91c170438068 pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:22 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8 Binding Scheduled Successfully assigned kuttl-test-native-chow/minio-service-7b949cd57-l2mz8 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:26 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-881098be-7b2b-4ae3-a718-91c170438068" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:30 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:32 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" in 2.271s (2.271s including waiting). Image size: 62642371 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:32 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8.spec.containers{minio} Created Created container: minio kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:32 +0000 UTC Normal Pod minio-service-7b949cd57-l2mz8.spec.containers{minio} Started Started container minio kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:41 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:43 +0000 UTC Normal Pod aws-cli Binding Scheduled Successfully assigned kuttl-test-native-chow/aws-cli to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:44 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:46 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 2.082s (2.082s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:46 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container: aws-cli kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:46 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/datadir-gr-demand-backup-haproxy-mysql-0" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-haproxy-mysql-0 Pod gr-demand-backup-haproxy-mysql-0 in StatefulSet gr-demand-backup-haproxy-mysql success statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal PodDisruptionBudget.policy gr-demand-backup-haproxy-mysql NoPods No matching pods found controllermanager logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Pod gr-demand-backup-haproxy-mysql-0 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:56 +0000 UTC Normal PerconaServerMySQL.ps.percona.com gr-demand-backup-haproxy ClusterStateChanged -> Initializing ps-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:29:59 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:00 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-0 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-l0gt default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 121ms (121ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 86ms (86ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 94ms (95ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:42 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:42 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:42 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/datadir-gr-demand-backup-haproxy-mysql-1" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:42 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-haproxy-mysql-1 Pod gr-demand-backup-haproxy-mysql-1 in StatefulSet gr-demand-backup-haproxy-mysql success statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:42 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Pod gr-demand-backup-haproxy-mysql-1 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:43 +0000 UTC Normal PodDisruptionBudget.policy gr-demand-backup-haproxy-haproxy NoPods No matching pods found controllermanager logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:43 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulCreate create Pod gr-demand-backup-haproxy-haproxy-0 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-0 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 133ms (133ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:45 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-543c7c32-05a7-4401-b19b-553014d80ea3 pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 101ms (101ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 85ms (85ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:46 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-1 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-q01s default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:50 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-543c7c32-05a7-4401-b19b-553014d80ea3" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:56 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:56 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 107ms (107ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:56 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:56 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 86ms (86ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 101ms (101ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:30:58 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:03 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-1 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-l0gt default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:03 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:03 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulCreate create Pod gr-demand-backup-haproxy-haproxy-1 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:04 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 142ms (142ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:04 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:04 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 124ms (125ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 87ms (87ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:22 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-2 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-q01s default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:22 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulCreate create Pod gr-demand-backup-haproxy-haproxy-2 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:23 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:23 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 126ms (126ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:23 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:23 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 89ms (89ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 73ms (73ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:25 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:32 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:32 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-haproxy-mysql-2 Pod gr-demand-backup-haproxy-mysql-2 in StatefulSet gr-demand-backup-haproxy-mysql success statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:32 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulCreate create Pod gr-demand-backup-haproxy-mysql-2 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:33 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:33 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/datadir-gr-demand-backup-haproxy-mysql-2" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:36 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-56ff831c-bb1a-4f41-aed2-a85991576207 pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:37 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-2 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:41 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-56ff831c-bb1a-4f41-aed2-a85991576207" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:42 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:42 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 118ms (118ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:43 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:43 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 78ms (78ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 98ms (98ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:31:44 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:22 +0000 UTC Normal PerconaServerMySQL.ps.percona.com gr-demand-backup-haproxy ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:32 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2 Binding Scheduled Successfully assigned kuttl-test-native-chow/xb-gr-demand-backup-haproxy-minio-minio-mh7f2 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:32 +0000 UTC Normal Job.batch xb-gr-demand-backup-haproxy-minio-minio SuccessfulCreate Created pod: xb-gr-demand-backup-haproxy-minio-minio-mh7f2 job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:33 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:33 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 126ms (126ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:33 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:33 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:35 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:35 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 90ms (90ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:35 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:35 +0000 UTC Normal Pod xb-gr-demand-backup-haproxy-minio-minio-mh7f2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:32:41 +0000 UTC Normal Job.batch xb-gr-demand-backup-haproxy-minio-minio Completed Job completed job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:00 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:00 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulDelete delete Pod gr-demand-backup-haproxy-mysql-2 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:01 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:01 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:01 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulDelete delete Pod gr-demand-backup-haproxy-haproxy-2 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:01 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:01 +0000 UTC Normal PerconaServerMySQL.ps.percona.com gr-demand-backup-haproxy ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:02 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:02 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:02 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulDelete delete Pod gr-demand-backup-haproxy-haproxy-1 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:03 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:03 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:03 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-haproxy SuccessfulDelete delete Pod gr-demand-backup-haproxy-haproxy-0 in StatefulSet gr-demand-backup-haproxy-haproxy successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:08 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulDelete delete Pod gr-demand-backup-haproxy-mysql-1 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:09 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:17 +0000 UTC Normal StatefulSet.apps gr-demand-backup-haproxy-mysql SuccessfulDelete delete Pod gr-demand-backup-haproxy-mysql-0 in StatefulSet gr-demand-backup-haproxy-mysql successful statefulset-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:17 +0000 UTC Normal PerconaServerMySQL.ps.percona.com gr-demand-backup-haproxy ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:21 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/04/15 09:33:21 MySQL state is not ready... kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:30 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f Binding Scheduled Successfully assigned kuttl-test-native-chow/xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:30 +0000 UTC Warning Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f FailedAttachVolume Multi-Attach error for volume "pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:30 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-haproxy-restore-minio SuccessfulCreate Created pod: xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:44 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:46 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:46 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 101ms (101ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:46 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:46 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:47 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:47 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 104ms (104ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:47 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:47 +0000 UTC Normal Pod xb-restore-gr-demand-backup-haproxy-restore-minio-jhm5f.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:58 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-haproxy-restore-minio Completed Job completed job-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:59 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-0 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-q01s default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:59 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:33:59 +0000 UTC Normal PerconaServerMySQL.ps.percona.com gr-demand-backup-haproxy ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:16 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f33e6ffc-3c95-4310-98ac-5c74341b2d7a" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 137ms (137ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:17 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 85ms (85ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 97ms (97ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:50 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:51 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:51 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/datadir-gr-demand-backup-haproxy-mysql-1" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:52 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-0 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:52 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:53 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 116ms (116ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:53 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:53 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-62346574-f9d9-41ff-b5fe-e64f5bce33ab pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 107ms (107ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 106ms (106ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:54 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:55 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:55 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-1 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-l0gt default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:34:59 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-62346574-f9d9-41ff-b5fe-e64f5bce33ab" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 118ms (118ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 95ms (95ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:07 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 90ms (90ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:08 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:08 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:11 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-1 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-l0gt default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:12 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:12 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 100ms (100ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:12 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:12 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:14 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:14 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 101ms (101ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:14 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:14 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:14 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:15 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 100ms (100ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:15 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:15 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:31 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-haproxy-2 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-q01s default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:32 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:32 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 136ms (136ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:32 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:32 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 93ms (93ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 97ms (97ms including waiting). Image size: 103524885 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:34 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:41 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/04/15 09:35:25 Waiting for MySQL ready state 2026/04/15 09:35:25 MySQL is ready 2026/04/15 09:35:25 Starting bootstrap... 2026/04/15 09:35:26 mysql-shell version: 8.4.6 2026/04/15 09:35:26 Clearing any stale group_replication_group_seeds 2026/04/15 09:35:26 Running dba.configureInstance('operator:*****@gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Configuring local MySQL instance listening at port 3306 for use in an InnoDB Cluster... This instance reports its own address as gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 applierWorkerThreads will be set to the default value of 4. Disabled super_read_only on the instance 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' Enabling super_read_only on the instance 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' The instance 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' is valid for InnoDB Cluster usage. Successfully enabled parallel appliers. 2026/04/15 09:35:27 Instance (gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow) configured to join to the InnoDB cluster 2026/04/15 09:35:27 peers: [gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow] 2026/04/15 09:35:27 Running dba.getCluster('grdemandbackuphaproxy') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2026/04/15 09:35:27 Connected to peer gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow 2026/04/15 09:35:28 Cluster status: ClusterName: grdemandbackuphaproxy Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 Topology: Member 0 Address: gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 State: ONLINE Errors: [] 2026/04/15 09:35:28 Primary is gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 2026/04/15 09:35:28 Primary GTID_EXECUTED=47782aa8-38ae-11f1-b2d1-b2cf751c8914:1-5, 53c82d18-38ae-11f1-b2d1-b2cf751c8914:1-64, b36caf7f-38ad-11f1-aa8b-4e44fdc82a26:1-4, bfd3d2b0-38ad-11f1-af4e-4e44fdc82a26:1-83 2026/04/15 09:35:29 Primary GTID_PURGED=b36caf7f-38ad-11f1-aa8b-4e44fdc82a26:1-4, bfd3d2b0-38ad-11f1-af4e-4e44fdc82a26:1-83 2026/04/15 09:35:30 Replica GTID_EXECUTED= 2026/04/15 09:35:30 Adding instance (gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow) to InnoDB cluster using clone recovery 2026/04/15 09:35:30 Running dba.getCluster('grdemandbackuphaproxy').addInstance('operator:*****@gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow', {'recoveryMethod': 'clone', 'recoveryProgress': 1}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306... This instance reports its own address as gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 is being cloned from gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 ** Stage DROP DATA: Completed ** Stage FILE COPY: Completed ** Stage PAGE COPY: Completed ** Stage REDO COPY: Completed NOTE: gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:41 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:41 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:35:41 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 81ms (81ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:15 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:15 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:15 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-native-chow/datadir-gr-demand-backup-haproxy-mysql-2" pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:19 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-haproxy-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-58003ffc-6faa-4e44-9cf0-d2d0c9bd725f pd.csi.storage.gke.io_gke-bc753a67225840b69b19-5d69-1879-vm_eb3ac154-71ad-48db-bec2-b935b4b218fa logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:19 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2 Binding Scheduled Successfully assigned kuttl-test-native-chow/gr-demand-backup-haproxy-mysql-2 to gke-jen-ps-1292-7efcb1c5-default-pool-a350dfaa-fnzk default-scheduler logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:24 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-58003ffc-6faa-4e44-9cf0-d2d0c9bd725f" attachdetach-controller logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:28 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:28 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1292-7efcb1c5" in 110ms (111ms including waiting). Image size: 110819445 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:28 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:28 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:29 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:29 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 102ms (102ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:29 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:30 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:30 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:30 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 122ms (122ms including waiting). Image size: 447419455 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:30 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:36:30 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:37:05 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/04/15 09:36:48 Waiting for MySQL ready state 2026/04/15 09:36:48 MySQL is ready 2026/04/15 09:36:48 Starting bootstrap... 2026/04/15 09:36:48 mysql-shell version: 8.4.6 2026/04/15 09:36:48 Clearing any stale group_replication_group_seeds 2026/04/15 09:36:49 Running dba.configureInstance('operator:*****@gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. Configuring local MySQL instance listening at port 3306 for use in an InnoDB Cluster... This instance reports its own address as gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 applierWorkerThreads will be set to the default value of 4. Disabled super_read_only on the instance 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' Enabling super_read_only on the instance 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' The instance 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' is valid for InnoDB Cluster usage. Successfully enabled parallel appliers. 2026/04/15 09:36:49 Instance (gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow) configured to join to the InnoDB cluster 2026/04/15 09:36:49 peers: [gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow] 2026/04/15 09:36:49 Running dba.getCluster('grdemandbackuphaproxy') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2026/04/15 09:36:50 Connected to peer gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow 2026/04/15 09:36:51 Cluster status: ClusterName: grdemandbackuphaproxy Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 Topology: Member 0 Address: gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 State: ONLINE Errors: [] 2026/04/15 09:36:51 Primary is gr-demand-backup-haproxy-mysql-0.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 2026/04/15 09:36:51 Primary GTID_EXECUTED=47782aa8-38ae-11f1-b2d1-b2cf751c8914:1-5, 53c82d18-38ae-11f1-b2d1-b2cf751c8914:1-74, b36caf7f-38ad-11f1-aa8b-4e44fdc82a26:1-4, bfd3d2b0-38ad-11f1-af4e-4e44fdc82a26:1-83 2026/04/15 09:36:52 Primary GTID_PURGED=b36caf7f-38ad-11f1-aa8b-4e44fdc82a26:1-4, bfd3d2b0-38ad-11f1-af4e-4e44fdc82a26:1-83 2026/04/15 09:36:52 Replica GTID_EXECUTED= 2026/04/15 09:36:53 Adding instance (gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow) to InnoDB cluster using clone recovery 2026/04/15 09:36:53 Running dba.getCluster('grdemandbackuphaproxy').addInstance('operator:*****@gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow', {'recoveryMethod': 'clone', 'recoveryProgress': 1}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306... This instance reports its own address as gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 is being cloned from gr-demand-backup-haproxy-mysql-1.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 ** Stage DROP DATA: Completed ** Stage FILE COPY: Completed ** Stage PAGE COPY: Completed ** Stage REDO COPY: Completed NOTE: gr-demand-backup-haproxy-mysql-2.gr-demand-backup-haproxy-mysql.kuttl-test-native-chow:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:37:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:37:05 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:37:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 96ms (96ms including waiting). Image size: 434738219 bytes. kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:05 +0000 UTC Normal Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:08 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/04/15 09:38:08 MySQL state is not ready... kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:10 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/04/15 09:38:10 MySQL state is not ready... kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:13 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/04/15 09:38:13 MySQL state is not ready... kubelet logger.go:42: 09:38:17 | gr-demand-backup-haproxy | 2026-04-15 09:38:15 +0000 UTC Warning Pod gr-demand-backup-haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/04/15 09:38:15 MySQL state is not ready... kubelet logger.go:42: 09:38:18 | gr-demand-backup-haproxy | Deleting namespace "kuttl-test-native-chow" === NAME kuttl harness.go:404: run tests finished harness.go:511: cleaning up harness.go:568: removing temp folder: "" --- PASS: kuttl (583.34s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/gr-demand-backup-haproxy (582.60s) PASS