=== RUN kuttl harness.go:460: starting setup harness.go:258: running tests using configured kubeconfig. harness.go:281: Successful connection to cluster at: https://34.10.104.151 harness.go:366: running tests harness.go:77: going to run test suite with timeout of 180 seconds for each step harness.go:378: testsuite: e2e-tests/tests has 53 tests === RUN kuttl/harness === RUN kuttl/harness/demand-backup === PAUSE kuttl/harness/demand-backup === CONT kuttl/harness/demand-backup logger.go:42: 15:24:51 | demand-backup | Ignoring "conf": does not begin with a number followed by a dash. logger.go:42: 15:24:51 | demand-backup | Creating namespace "kuttl-test-fancy-weevil" logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_tls_cluster_secrets deploy_client if has_minio_storage; then deploy_minio fi] logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | + source ../../functions logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ realpath ../../.. logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++++ pwd logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++ test_name=demand-backup logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ [[ -z 8.0 ]] logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ MYSQL_VERSION=8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++++ which gdate logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | ++++ which date logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ export date=/usr/sbin/date logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ oc get projects logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ : logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ kubectl get nodes logger.go:42: 15:24:51 | demand-backup/0-deploy-operator | +++ grep '^minikube' logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | +++ which gsed logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | +++ which sed logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | ++ sed=/usr/sbin/sed logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + init_temp_dir logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + rm -rf /tmp/kuttl/ps/demand-backup logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/demand-backup logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + apply_s3_storage_secrets logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + apply_minio_secret logger.go:42: 15:24:53 | demand-backup/0-deploy-operator | + kubectl -n kuttl-test-fancy-weevil apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/minio-secret.yml logger.go:42: 15:24:54 | demand-backup/0-deploy-operator | secret/minio-secret created logger.go:42: 15:24:54 | demand-backup/0-deploy-operator | + kubectl -n kuttl-test-fancy-weevil apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/cloud-secret.yml logger.go:42: 15:24:55 | demand-backup/0-deploy-operator | secret/aws-s3-secret created logger.go:42: 15:24:56 | demand-backup/0-deploy-operator | secret/do-spaces-secret created logger.go:42: 15:24:56 | demand-backup/0-deploy-operator | secret/gcp-cs-secret created logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | secret/azure-secret created logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + deploy_operator logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + destroy_operator logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + true logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 15:24:57 | demand-backup/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + true logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + create_namespace ps-operator logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + local namespace=ps-operator logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + [[ -n '' ]] logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 15:24:58 | demand-backup/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 15:24:59 | demand-backup/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 15:24:59 | demand-backup/0-deploy-operator | namespace/ps-operator created logger.go:42: 15:24:59 | demand-backup/0-deploy-operator | + apply_crd logger.go:42: 15:24:59 | demand-backup/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/crd.yaml logger.go:42: 15:25:00 | demand-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 15:25:00 | demand-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | + apply_rbac logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | + local rbac_file logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | + rbac_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-rbac.yaml logger.go:42: 15:25:02 | demand-backup/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-rbac.yaml logger.go:42: 15:25:03 | demand-backup/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 15:25:04 | demand-backup/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:25:04 | demand-backup/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + local operator_file logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + operator_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-operator.yaml logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "VERBOSE"' logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 15:25:05 | demand-backup/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-1238-7677a7b6"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cw-operator.yaml logger.go:42: 15:25:07 | demand-backup/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 15:25:07 | demand-backup/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 15:25:07 | demand-backup/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 15:25:07 | demand-backup/0-deploy-operator | + kubectl -n kuttl-test-fancy-weevil apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/ssl-secret.yaml logger.go:42: 15:25:08 | demand-backup/0-deploy-operator | secret/test-ssl created logger.go:42: 15:25:08 | demand-backup/0-deploy-operator | + deploy_client logger.go:42: 15:25:08 | demand-backup/0-deploy-operator | + kubectl -n kuttl-test-fancy-weevil apply -f - logger.go:42: 15:25:08 | demand-backup/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:25:08 | demand-backup/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf/client.yaml logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | pod/mysql-client created logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | + has_minio_storage logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | + local name_suffix= logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | + local cr_name=demand-backup logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | ++ get_test_cr demand-backup logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | ++ local cr_name=demand-backup logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | +++ detect_k8s_platform logger.go:42: 15:25:09 | demand-backup/0-deploy-operator | +++ set +x logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ echo gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ local platform=gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ get_platform_alias gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ set +x logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ echo gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ platform_alias=gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ get_storage_alias gke logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ set +x logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | +++ echo default logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ storage_alias=default logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ local default_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ local platform_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ local storage_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ echo /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + local cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + [[ -z /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ yq eval $'\n\t\t(.spec.backup.storages // {})\n\t\t| keys\n\t\t| map(select(test("^minio")))\n\t\t| length > 0\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + local has_minio=true logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + [[ true == true ]] logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + echo 'MinIO enabled in /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml' logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | MinIO enabled in /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + return 0 logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + deploy_minio logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + local storage=2G logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + local access_key logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + local secret_key logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ kubectl -n kuttl-test-fancy-weevil get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ base64 -d logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | + access_key=some-access-key logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ kubectl -n kuttl-test-fancy-weevil get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 15:25:10 | demand-backup/0-deploy-operator | ++ base64 -d logger.go:42: 15:25:11 | demand-backup/0-deploy-operator | + secret_key=some-secret-key logger.go:42: 15:25:11 | demand-backup/0-deploy-operator | + helm uninstall -n kuttl-test-fancy-weevil minio-service logger.go:42: 15:25:11 | demand-backup/0-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 15:25:11 | demand-backup/0-deploy-operator | + : logger.go:42: 15:25:11 | demand-backup/0-deploy-operator | + helm repo remove minio logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | Error: no repositories configured logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + : logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | "minio" has been added to your repositories logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | +++ printf %q some-access-key logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | ++ printf %q some-access-key logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | +++ printf %q some-secret-key logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | ++ printf %q some-secret-key logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-fancy-weevil --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + local max=10 logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + local delay=60 logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + shift 2 logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + local n=1 logger.go:42: 15:25:12 | demand-backup/0-deploy-operator | + helm install minio-service -n kuttl-test-fancy-weevil --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | NAME: minio-service logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | LAST DEPLOYED: Tue May 5 15:25:13 2026 logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | NAMESPACE: kuttl-test-fancy-weevil logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | STATUS: deployed logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | REVISION: 1 logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | TEST SUITE: None logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | NOTES: logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | minio-service.kuttl-test-fancy-weevil.cluster.local logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-fancy-weevil -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-fancy-weevil logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-fancy-weevil minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-fancy-weevil minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | 3. mc ls minio-service-local logger.go:42: 15:25:42 | demand-backup/0-deploy-operator | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + MINIO_POD=minio-service-649c5b46f8-5fcm6 logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + wait_pod minio-service-649c5b46f8-5fcm6 logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + local pod=minio-service-649c5b46f8-5fcm6 logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + set +o xtrace logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | minio-service-649c5b46f8-5fcm6true logger.go:42: 15:25:43 | demand-backup/0-deploy-operator | + kubectl -n kuttl-test-fancy-weevil run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access-key'\'' AWS_SECRET_ACCESS_KEY='\''some-secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 15:25:47 | demand-backup/0-deploy-operator | make_bucket: operator-testing logger.go:42: 15:25:47 | demand-backup/0-deploy-operator | All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt. logger.go:42: 15:25:47 | demand-backup/0-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 15:25:54 | demand-backup/0-deploy-operator | pod "aws-cli" deleted from kuttl-test-fancy-weevil namespace logger.go:42: 15:25:54 | demand-backup/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:25:54 | demand-backup/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:25:55 | demand-backup/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 15:25:55 | demand-backup/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 15:25:55 | demand-backup/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 15:25:55 | demand-backup/0-deploy-operator | ASSERT PASS logger.go:42: 15:25:55 | demand-backup/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 15:25:55 | demand-backup/1-create-cluster | starting test step 1-create-cluster logger.go:42: 15:25:55 | demand-backup/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:25:55 | demand-backup/1-create-cluster | + source ../../functions logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ realpath ../../.. logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++++ pwd logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++ test_name=demand-backup logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ [[ -z 8.0 ]] logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ MYSQL_VERSION=8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++++ which gdate logger.go:42: 15:25:55 | demand-backup/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++++ which date logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ export date=/usr/sbin/date logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ date=/usr/sbin/date logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ oc get projects logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ : logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ kubectl get nodes logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ grep '^minikube' logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ which gsed logger.go:42: 15:25:55 | demand-backup/1-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:25:55 | demand-backup/1-create-cluster | +++ which sed logger.go:42: 15:25:55 | demand-backup/1-create-cluster | ++ sed=/usr/sbin/sed logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + get_cr logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local name_suffix= logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local image_binlog_server=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + kubectl -n kuttl-test-fancy-weevil apply -f - logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local cr_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml logger.go:42: 15:25:56 | demand-backup/1-create-cluster | ++ detect_k8s_platform logger.go:42: 15:25:56 | demand-backup/1-create-cluster | ++ set +x logger.go:42: 15:25:56 | demand-backup/1-create-cluster | ++ echo gke logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local platform=gke logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local cr_name=demand-backup logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + crs=('/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml') logger.go:42: 15:25:56 | demand-backup/1-create-cluster | + local crs logger.go:42: 15:25:56 | demand-backup/1-create-cluster | ++ get_test_cr demand-backup logger.go:42: 15:25:56 | demand-backup/1-create-cluster | ++ local cr_name=demand-backup logger.go:42: 15:25:56 | demand-backup/1-create-cluster | +++ detect_k8s_platform logger.go:42: 15:25:56 | demand-backup/1-create-cluster | +++ set +x logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ echo gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ local platform=gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ get_platform_alias gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ set +x logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ echo gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ platform_alias=gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ get_storage_alias gke logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ set +x logger.go:42: 15:25:57 | demand-backup/1-create-cluster | +++ echo default logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ storage_alias=default logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ local default_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ local platform_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ local storage_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-default.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ [[ -n /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup-gke.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | ++ echo /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + local test_cr=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + crs+=("$test_cr") logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + yq eval-all $'\n\t\tselect(fileIndex == 0) as $base |\n\t\tselect(fileIndex == 1) as $test |\n\t\t($base * ($test // {})) |\n\t\t.spec.backup.storages = ($test.spec.backup.storages // $base.spec.backup.storages // {}) |\n\t\t.spec.mysql.clusterType = ($test.spec.mysql.clusterType // "async") |\n\t\t.metadata.name = "demand-backup" |\n\t\t.spec.initContainer.image = "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" |\n\t\t.spec.secretsName = ($test.spec.secretsName // null) |\n\t\t.spec.sslSecretName = "test-ssl" |\n\t\t.spec.upgradeOptions.apply = "disabled" |\n\t\t.spec.mysql.gracePeriod = 30 |\n\t\t.spec.orchestrator.enabled = true |\n\t\t.spec.mysql.image = "perconalab/percona-server-mysql-operator:main-psmysql8.0" |\n\t\t.spec.backup.image = "perconalab/percona-server-mysql-operator:main-backup8.0" |\n\t\t.spec.orchestrator.image = "perconalab/percona-server-mysql-operator:main-orchestrator" |\n\t\t.spec.proxy.router.image = "perconalab/percona-server-mysql-operator:main-router8.0" |\n\t\t.spec.toolkit.image = "perconalab/percona-server-mysql-operator:main-toolkit" |\n\t\t.spec.proxy.haproxy.image = "perconalab/percona-server-mysql-operator:main-haproxy" |\n\t\t.spec.pmm.image = "perconalab/pmm-client:3-dev-latest" |\n\t\t.spec.backup.pitr.binlogServer.image="perconalab/percona-binlog-server:0.2.1" |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "kuttl-test-fancy-weevil")\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/cr.yaml /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/demand-backup.yaml logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + [[ gke == minikube ]] logger.go:42: 15:25:57 | demand-backup/1-create-cluster | + cat logger.go:42: 15:25:59 | demand-backup/1-create-cluster | perconaservermysql.ps.percona.com/demand-backup created logger.go:42: 15:30:15 | demand-backup/1-create-cluster | test step completed 1-create-cluster logger.go:42: 15:30:15 | demand-backup/2-write-data | starting test step 2-write-data logger.go:42: 15:30:15 | demand-backup/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name))" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name))"] logger.go:42: 15:30:15 | demand-backup/2-write-data | + source ../../functions logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ realpath ../../.. logger.go:42: 15:30:15 | demand-backup/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:15 | demand-backup/2-write-data | ++++ pwd logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:30:15 | demand-backup/2-write-data | ++ test_name=demand-backup logger.go:42: 15:30:15 | demand-backup/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:30:15 | demand-backup/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ [[ -z 8.0 ]] logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:30:15 | demand-backup/2-write-data | ++++ which gdate logger.go:42: 15:30:15 | demand-backup/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:30:15 | demand-backup/2-write-data | ++++ which date logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ export date=/usr/sbin/date logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ date=/usr/sbin/date logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ oc get projects logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ : logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ kubectl get nodes logger.go:42: 15:30:15 | demand-backup/2-write-data | +++ grep '^minikube' logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ which gsed logger.go:42: 15:30:16 | demand-backup/2-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ which sed logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ sed=/usr/sbin/sed logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ get_cluster_name logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ local cluster=demand-backup logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ echo demand-backup-haproxy logger.go:42: 15:30:16 | demand-backup/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h demand-backup-haproxy' logger.go:42: 15:30:16 | demand-backup/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 15:30:16 | demand-backup/2-write-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ get_user_pass root logger.go:42: 15:30:16 | demand-backup/2-write-data | ++ local user=root logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ get_cluster_name logger.go:42: 15:30:16 | demand-backup/2-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:30:17 | demand-backup/2-write-data | ++ local secret=demand-backup-secrets logger.go:42: 15:30:17 | demand-backup/2-write-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:30:17 | demand-backup/2-write-data | ++ base64 --decode logger.go:42: 15:30:17 | demand-backup/2-write-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:30:17 | demand-backup/2-write-data | + local pod= logger.go:42: 15:30:17 | demand-backup/2-write-data | ++ get_client_pod logger.go:42: 15:30:17 | demand-backup/2-write-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:30:18 | demand-backup/2-write-data | + client_pod=mysql-client logger.go:42: 15:30:18 | demand-backup/2-write-data | + wait_pod mysql-client logger.go:42: 15:30:18 | demand-backup/2-write-data | + local pod=mysql-client logger.go:42: 15:30:18 | demand-backup/2-write-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:30:18 | demand-backup/2-write-data | + set +o xtrace logger.go:42: 15:30:19 | demand-backup/2-write-data | mysql-clienttrue logger.go:42: 15:30:19 | demand-backup/2-write-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:30:19 | demand-backup/2-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:30:19 | demand-backup/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:30:20 | demand-backup/2-write-data | + : logger.go:42: 15:30:20 | demand-backup/2-write-data | +++ get_cluster_name logger.go:42: 15:30:20 | demand-backup/2-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:30:20 | demand-backup/2-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:30:20 | demand-backup/2-write-data | ++ local cluster=demand-backup logger.go:42: 15:30:20 | demand-backup/2-write-data | ++ echo demand-backup-haproxy logger.go:42: 15:30:20 | demand-backup/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h demand-backup-haproxy' logger.go:42: 15:30:20 | demand-backup/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 15:30:20 | demand-backup/2-write-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:30:20 | demand-backup/2-write-data | ++ get_user_pass root logger.go:42: 15:30:20 | demand-backup/2-write-data | ++ local user=root logger.go:42: 15:30:20 | demand-backup/2-write-data | +++ get_cluster_name logger.go:42: 15:30:20 | demand-backup/2-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:30:21 | demand-backup/2-write-data | ++ local secret=demand-backup-secrets logger.go:42: 15:30:21 | demand-backup/2-write-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:30:21 | demand-backup/2-write-data | ++ base64 --decode logger.go:42: 15:30:21 | demand-backup/2-write-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:30:21 | demand-backup/2-write-data | + local pod= logger.go:42: 15:30:21 | demand-backup/2-write-data | ++ get_client_pod logger.go:42: 15:30:21 | demand-backup/2-write-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:30:22 | demand-backup/2-write-data | + client_pod=mysql-client logger.go:42: 15:30:22 | demand-backup/2-write-data | + wait_pod mysql-client logger.go:42: 15:30:22 | demand-backup/2-write-data | + local pod=mysql-client logger.go:42: 15:30:22 | demand-backup/2-write-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:30:22 | demand-backup/2-write-data | + set +o xtrace logger.go:42: 15:30:22 | demand-backup/2-write-data | mysql-clienttrue logger.go:42: 15:30:22 | demand-backup/2-write-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:30:22 | demand-backup/2-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:30:22 | demand-backup/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:30:23 | demand-backup/2-write-data | + : logger.go:42: 15:30:23 | demand-backup/2-write-data | test step completed 2-write-data logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | starting test step 3-move-primary-before-backup logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary_pod_from_label="$(get_primary_from_label)" kubectl delete pod -n ${NAMESPACE} ${primary_pod_from_label} wait_cluster_consistency_async "${test_name}" "3" "3" new_primary_pod_from_label="$(get_primary_from_label)" if [ "${primary_pod_from_label}" == "${new_primary_pod_from_label}" ]; then echo "Old (${primary_pod_from_label}) and new (${new_primary_pod_from_label}) primary are the same (the failover didn't happen)!" exit 1 fi] logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | + source ../../functions logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ realpath ../../.. logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++++ pwd logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++ test_name=demand-backup logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ [[ -z 8.0 ]] logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ MYSQL_VERSION=8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export MINIO_VER=5.4.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ MINIO_VER=5.4.0 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export VAULT_VER=0.16.1 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ VAULT_VER=0.16.1 logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++++ which gdate logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | ++++ which date logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ export date=/usr/sbin/date logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ date=/usr/sbin/date logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ oc get projects logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ : logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ kubectl get nodes logger.go:42: 15:30:23 | demand-backup/3-move-primary-before-backup | +++ grep '^minikube' logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | +++ which gsed logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | +++ which sed logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | ++ sed=/usr/sbin/sed logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | ++ kubectl -n kuttl-test-fancy-weevil get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | + primary_pod_from_label=demand-backup-mysql-0 logger.go:42: 15:30:24 | demand-backup/3-move-primary-before-backup | + kubectl delete pod -n kuttl-test-fancy-weevil demand-backup-mysql-0 logger.go:42: 15:30:25 | demand-backup/3-move-primary-before-backup | pod "demand-backup-mysql-0" deleted from kuttl-test-fancy-weevil namespace logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + wait_cluster_consistency_async demand-backup 3 3 logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + local cluster_name=demand-backup logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + local cluster_size=3 logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + local orc_size=3 logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + '[' -z 3 ']' logger.go:42: 15:30:45 | demand-backup/3-move-primary-before-backup | + sleep 7 logger.go:42: 15:30:52 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:30:53 | demand-backup/3-move-primary-before-backup | + [[ initializing == ready ]] logger.go:42: 15:30:53 | demand-backup/3-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:30:53 | demand-backup/3-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 15:30:53 | demand-backup/3-move-primary-before-backup | + sleep 15 logger.go:42: 15:31:08 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:31:08 | demand-backup/3-move-primary-before-backup | + [[ initializing == ready ]] logger.go:42: 15:31:08 | demand-backup/3-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:31:08 | demand-backup/3-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 15:31:08 | demand-backup/3-move-primary-before-backup | + sleep 15 logger.go:42: 15:31:23 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:31:24 | demand-backup/3-move-primary-before-backup | + [[ initializing == ready ]] logger.go:42: 15:31:24 | demand-backup/3-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:31:24 | demand-backup/3-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 15:31:24 | demand-backup/3-move-primary-before-backup | + sleep 15 logger.go:42: 15:31:39 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:31:39 | demand-backup/3-move-primary-before-backup | + [[ initializing == ready ]] logger.go:42: 15:31:39 | demand-backup/3-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 15:31:39 | demand-backup/3-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 15:31:39 | demand-backup/3-move-primary-before-backup | + sleep 15 logger.go:42: 15:31:54 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.state}' logger.go:42: 15:31:55 | demand-backup/3-move-primary-before-backup | + [[ ready == ready ]] logger.go:42: 15:31:55 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.mysql.ready}' logger.go:42: 15:31:55 | demand-backup/3-move-primary-before-backup | + [[ 3 == 3 ]] logger.go:42: 15:31:55 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 15:31:56 | demand-backup/3-move-primary-before-backup | + [[ 3 == 3 ]] logger.go:42: 15:31:56 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 15:31:56 | demand-backup/3-move-primary-before-backup | + [[ ready == ready ]] logger.go:42: 15:31:56 | demand-backup/3-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o 'jsonpath={.status.state}' logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | + [[ ready == ready ]] logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | ++ kubectl -n kuttl-test-fancy-weevil get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | + new_primary_pod_from_label=demand-backup-mysql-1 logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | + '[' demand-backup-mysql-0 == demand-backup-mysql-1 ']' logger.go:42: 15:31:57 | demand-backup/3-move-primary-before-backup | test step completed 3-move-primary-before-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | starting test step 4-create-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Gets first storage defined and run backup storage_name=$(kubectl get ps demand-backup -n ${NAMESPACE} -o yaml | yq '(.spec.backup.storages // {}) | keys | .[0]') run_backup demand-backup $storage_name ] logger.go:42: 15:31:57 | demand-backup/4-create-backup | + source ../../functions logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ realpath ../../.. logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++++ pwd logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++ test_name=demand-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ [[ -z 8.0 ]] logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ MYSQL_VERSION=8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export MINIO_VER=5.4.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ MINIO_VER=5.4.0 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export VAULT_VER=0.16.1 logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ VAULT_VER=0.16.1 logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++++ which gdate logger.go:42: 15:31:57 | demand-backup/4-create-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:31:57 | demand-backup/4-create-backup | ++++ which date logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ export date=/usr/sbin/date logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ date=/usr/sbin/date logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ oc get projects logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ : logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ kubectl get nodes logger.go:42: 15:31:57 | demand-backup/4-create-backup | +++ grep '^minikube' logger.go:42: 15:31:58 | demand-backup/4-create-backup | +++ which gsed logger.go:42: 15:31:58 | demand-backup/4-create-backup | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:31:58 | demand-backup/4-create-backup | +++ which sed logger.go:42: 15:31:58 | demand-backup/4-create-backup | ++ sed=/usr/sbin/sed logger.go:42: 15:31:58 | demand-backup/4-create-backup | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o yaml logger.go:42: 15:31:58 | demand-backup/4-create-backup | ++ yq '(.spec.backup.storages // {}) | keys | .[0]' logger.go:42: 15:31:59 | demand-backup/4-create-backup | + storage_name=minio logger.go:42: 15:31:59 | demand-backup/4-create-backup | + run_backup demand-backup minio logger.go:42: 15:31:59 | demand-backup/4-create-backup | + local backup_name=demand-backup logger.go:42: 15:31:59 | demand-backup/4-create-backup | + local storage_name=minio logger.go:42: 15:31:59 | demand-backup/4-create-backup | ++ get_cluster_name logger.go:42: 15:31:59 | demand-backup/4-create-backup | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:31:59 | demand-backup/4-create-backup | + local cluster_name=demand-backup logger.go:42: 15:31:59 | demand-backup/4-create-backup | + local prefix= logger.go:42: 15:31:59 | demand-backup/4-create-backup | + local backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/backup/backup.yaml logger.go:42: 15:31:59 | demand-backup/4-create-backup | + test_backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup.yaml logger.go:42: 15:31:59 | demand-backup/4-create-backup | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup.yaml ]] logger.go:42: 15:31:59 | demand-backup/4-create-backup | + backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup.yaml logger.go:42: 15:31:59 | demand-backup/4-create-backup | + echo 'Running backup demand-backup using storage minio' logger.go:42: 15:31:59 | demand-backup/4-create-backup | Running backup demand-backup using storage minio logger.go:42: 15:31:59 | demand-backup/4-create-backup | + yq eval $'\n\t\t.metadata.name = "demand-backup" |\n\t\t.spec.storageName = "minio" |\n\t\t.spec.clusterName = "demand-backup"\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup.yaml logger.go:42: 15:31:59 | demand-backup/4-create-backup | + kubectl apply -n kuttl-test-fancy-weevil -f - logger.go:42: 15:32:00 | demand-backup/4-create-backup | perconaservermysqlbackup.ps.percona.com/demand-backup created logger.go:42: 15:32:12 | demand-backup/4-create-backup | test step completed 4-create-backup logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | starting test step 5-check-cmd-flags logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | running command: [sh -c set -o errexit set -o xtrace source ../../functions backup_pod_name=$(kubectl get pod -n ${NAMESPACE} -l app.kubernetes.io/instance=demand-backup,app.kubernetes.io/component=backup -o name) xtrabackup_flag_count=$(kubectl logs -n ${NAMESPACE} $backup_pod_name | grep -- "--strict" | wc -l) if [ "$xtrabackup_flag_count" -eq 0 ]; then echo "custom flag --strict was provided to the backup but it's not mentioned in the logs" exit 1 fi] logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | + source ../../functions logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ realpath ../../.. logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++++ pwd logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++ test_name=demand-backup logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ [[ -z 8.0 ]] logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ MYSQL_VERSION=8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export MINIO_VER=5.4.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ MINIO_VER=5.4.0 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export VAULT_VER=0.16.1 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ VAULT_VER=0.16.1 logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++++ which gdate logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | ++++ which date logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ export date=/usr/sbin/date logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ date=/usr/sbin/date logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ oc get projects logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ : logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ kubectl get nodes logger.go:42: 15:32:12 | demand-backup/5-check-cmd-flags | +++ grep '^minikube' logger.go:42: 15:32:13 | demand-backup/5-check-cmd-flags | +++ which gsed logger.go:42: 15:32:13 | demand-backup/5-check-cmd-flags | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:13 | demand-backup/5-check-cmd-flags | +++ which sed logger.go:42: 15:32:13 | demand-backup/5-check-cmd-flags | ++ sed=/usr/sbin/sed logger.go:42: 15:32:13 | demand-backup/5-check-cmd-flags | ++ kubectl get pod -n kuttl-test-fancy-weevil -l app.kubernetes.io/instance=demand-backup,app.kubernetes.io/component=backup -o name logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | + backup_pod_name=pod/xb-demand-backup-minio-72skg logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | ++ kubectl logs -n kuttl-test-fancy-weevil pod/xb-demand-backup-minio-72skg logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | ++ grep -- --strict logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | ++ wc -l logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | Defaulted container "xtrabackup" out of: xtrabackup, xtrabackup-init (init) logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | + xtrabackup_flag_count=1 logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | + '[' 1 -eq 0 ']' logger.go:42: 15:32:14 | demand-backup/5-check-cmd-flags | test step completed 5-check-cmd-flags logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | starting test step 6-check-password-leak logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Temporarily skipping this check #check_passwords_leak] logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | + source ../../functions logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ realpath ../../.. logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++++ pwd logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++ test_name=demand-backup logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ [[ -z 8.0 ]] logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ MYSQL_VERSION=8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export MINIO_VER=5.4.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ MINIO_VER=5.4.0 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export VAULT_VER=0.16.1 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ VAULT_VER=0.16.1 logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++++ which gdate logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | ++++ which date logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ export date=/usr/sbin/date logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ date=/usr/sbin/date logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ oc get projects logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ : logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ kubectl get nodes logger.go:42: 15:32:14 | demand-backup/6-check-password-leak | +++ grep '^minikube' logger.go:42: 15:32:15 | demand-backup/6-check-password-leak | +++ which gsed logger.go:42: 15:32:15 | demand-backup/6-check-password-leak | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:15 | demand-backup/6-check-password-leak | +++ which sed logger.go:42: 15:32:15 | demand-backup/6-check-password-leak | ++ sed=/usr/sbin/sed logger.go:42: 15:32:15 | demand-backup/6-check-password-leak | test step completed 6-check-password-leak logger.go:42: 15:32:15 | demand-backup/7-delete-data | starting test step 7-delete-data logger.go:42: 15:32:15 | demand-backup/7-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 07-delete-data-${i} --from-literal=data="${data}" done] logger.go:42: 15:32:15 | demand-backup/7-delete-data | + source ../../functions logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ realpath ../../.. logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++++ pwd logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++ test_name=demand-backup logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ [[ -z 8.0 ]] logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++++ which gdate logger.go:42: 15:32:15 | demand-backup/7-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++++ which date logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ export date=/usr/sbin/date logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ date=/usr/sbin/date logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ oc get projects logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ : logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ kubectl get nodes logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ grep '^minikube' logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ which gsed logger.go:42: 15:32:15 | demand-backup/7-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ which sed logger.go:42: 15:32:15 | demand-backup/7-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ get_cluster_name logger.go:42: 15:32:15 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:16 | demand-backup/7-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:32:16 | demand-backup/7-delete-data | ++ local cluster=demand-backup logger.go:42: 15:32:16 | demand-backup/7-delete-data | ++ echo demand-backup-haproxy logger.go:42: 15:32:16 | demand-backup/7-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy' logger.go:42: 15:32:16 | demand-backup/7-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 15:32:16 | demand-backup/7-delete-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:32:16 | demand-backup/7-delete-data | ++ get_user_pass root logger.go:42: 15:32:16 | demand-backup/7-delete-data | ++ local user=root logger.go:42: 15:32:16 | demand-backup/7-delete-data | +++ get_cluster_name logger.go:42: 15:32:16 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:17 | demand-backup/7-delete-data | ++ local secret=demand-backup-secrets logger.go:42: 15:32:17 | demand-backup/7-delete-data | ++ base64 --decode logger.go:42: 15:32:17 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:32:17 | demand-backup/7-delete-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:17 | demand-backup/7-delete-data | + local pod= logger.go:42: 15:32:17 | demand-backup/7-delete-data | ++ get_client_pod logger.go:42: 15:32:17 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:32:17 | demand-backup/7-delete-data | + client_pod=mysql-client logger.go:42: 15:32:17 | demand-backup/7-delete-data | + wait_pod mysql-client logger.go:42: 15:32:17 | demand-backup/7-delete-data | + local pod=mysql-client logger.go:42: 15:32:17 | demand-backup/7-delete-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:32:17 | demand-backup/7-delete-data | + set +o xtrace logger.go:42: 15:32:18 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 15:32:18 | demand-backup/7-delete-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:18 | demand-backup/7-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:32:18 | demand-backup/7-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:32:19 | demand-backup/7-delete-data | + : logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++ get_cluster_name logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:19 | demand-backup/7-delete-data | + cluster_name=demand-backup logger.go:42: 15:32:19 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++ local 'host=-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:32:19 | demand-backup/7-delete-data | +++ get_user_pass root logger.go:42: 15:32:19 | demand-backup/7-delete-data | +++ local user=root logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++++ get_cluster_name logger.go:42: 15:32:19 | demand-backup/7-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:20 | demand-backup/7-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:32:20 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:32:20 | demand-backup/7-delete-data | +++ base64 --decode logger.go:42: 15:32:20 | demand-backup/7-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:20 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 15:32:20 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 15:32:20 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 15:32:21 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:32:21 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:32:22 | demand-backup/7-delete-data | ++ : logger.go:42: 15:32:22 | demand-backup/7-delete-data | + data= logger.go:42: 15:32:22 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 07-delete-data-0 --from-literal=data= logger.go:42: 15:32:23 | demand-backup/7-delete-data | configmap/07-delete-data-0 created logger.go:42: 15:32:23 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 15:32:23 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:32:23 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:32:23 | demand-backup/7-delete-data | ++ local 'host=-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:32:23 | demand-backup/7-delete-data | +++ get_user_pass root logger.go:42: 15:32:23 | demand-backup/7-delete-data | +++ local user=root logger.go:42: 15:32:23 | demand-backup/7-delete-data | ++++ get_cluster_name logger.go:42: 15:32:23 | demand-backup/7-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:23 | demand-backup/7-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:32:23 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:32:23 | demand-backup/7-delete-data | +++ base64 --decode logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 15:32:24 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 15:32:24 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 15:32:24 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:32:24 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:32:25 | demand-backup/7-delete-data | ++ : logger.go:42: 15:32:25 | demand-backup/7-delete-data | + data= logger.go:42: 15:32:25 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 07-delete-data-1 --from-literal=data= logger.go:42: 15:32:26 | demand-backup/7-delete-data | configmap/07-delete-data-1 created logger.go:42: 15:32:26 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 15:32:26 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:32:26 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:32:26 | demand-backup/7-delete-data | ++ local 'host=-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:32:26 | demand-backup/7-delete-data | +++ get_user_pass root logger.go:42: 15:32:26 | demand-backup/7-delete-data | +++ local user=root logger.go:42: 15:32:26 | demand-backup/7-delete-data | ++++ get_cluster_name logger.go:42: 15:32:26 | demand-backup/7-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:32:26 | demand-backup/7-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:32:26 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:32:26 | demand-backup/7-delete-data | +++ base64 --decode logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 15:32:27 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 15:32:27 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:32:27 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 15:32:28 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 15:32:28 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:32:28 | demand-backup/7-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:32:28 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:32:29 | demand-backup/7-delete-data | ++ : logger.go:42: 15:32:29 | demand-backup/7-delete-data | + data= logger.go:42: 15:32:29 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 07-delete-data-2 --from-literal=data= logger.go:42: 15:32:29 | demand-backup/7-delete-data | configmap/07-delete-data-2 created logger.go:42: 15:32:30 | demand-backup/7-delete-data | test step completed 7-delete-data logger.go:42: 15:32:30 | demand-backup/8-restore | starting test step 8-restore logger.go:42: 15:32:31 | demand-backup/8-restore | PerconaServerMySQLRestore:kuttl-test-fancy-weevil/restore-of-demand-backup created logger.go:42: 15:36:49 | demand-backup/8-restore | test step completed 8-restore logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | starting test step 9-check-cmd-flags logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | running command: [sh -c set -o errexit set -o xtrace source ../../functions restore_pod_name=$(kubectl get pod -n ${NAMESPACE} -l app.kubernetes.io/instance=restore-of-demand-backup,app.kubernetes.io/component=restore -o name) xbstream_flag_count=$(kubectl logs -n ${NAMESPACE} $restore_pod_name | grep -- "--verbose" | wc -l) if [ "$xbstream_flag_count" -eq 0 ]; then echo "custom flag --verbose was provided to the restore but it's not mentioned in the logs" exit 1 fi xtrabackup_flag_count=$(kubectl logs -n ${NAMESPACE} $restore_pod_name | grep -- "--strict" | wc -l) if [ "$xtrabackup_flag_count" -eq 0 ]; then echo "custom flag --strict was provided to the restore but it's not mentioned in the logs" exit 1 fi] logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | + source ../../functions logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ realpath ../../.. logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++++ pwd logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++ test_name=demand-backup logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ [[ -z 8.0 ]] logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ MYSQL_VERSION=8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export MINIO_VER=5.4.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ MINIO_VER=5.4.0 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export VAULT_VER=0.16.1 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ VAULT_VER=0.16.1 logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++++ which gdate logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++++ which date logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ export date=/usr/sbin/date logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ date=/usr/sbin/date logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ oc get projects logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ : logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ kubectl get nodes logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ grep '^minikube' logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ which gsed logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | +++ which sed logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++ sed=/usr/sbin/sed logger.go:42: 15:36:49 | demand-backup/9-check-cmd-flags | ++ kubectl get pod -n kuttl-test-fancy-weevil -l app.kubernetes.io/instance=restore-of-demand-backup,app.kubernetes.io/component=restore -o name logger.go:42: 15:36:50 | demand-backup/9-check-cmd-flags | + restore_pod_name=pod/xb-restore-restore-of-demand-backup-5lrkc logger.go:42: 15:36:50 | demand-backup/9-check-cmd-flags | ++ kubectl logs -n kuttl-test-fancy-weevil pod/xb-restore-restore-of-demand-backup-5lrkc logger.go:42: 15:36:50 | demand-backup/9-check-cmd-flags | ++ grep -- --verbose logger.go:42: 15:36:50 | demand-backup/9-check-cmd-flags | ++ wc -l logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | Defaulted container "xtrabackup" out of: xtrabackup, xtrabackup-init (init) logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | + xbstream_flag_count=1 logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | + '[' 1 -eq 0 ']' logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | ++ kubectl logs -n kuttl-test-fancy-weevil pod/xb-restore-restore-of-demand-backup-5lrkc logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | ++ grep -- --strict logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | ++ wc -l logger.go:42: 15:36:51 | demand-backup/9-check-cmd-flags | Defaulted container "xtrabackup" out of: xtrabackup, xtrabackup-init (init) logger.go:42: 15:36:52 | demand-backup/9-check-cmd-flags | + xtrabackup_flag_count=6 logger.go:42: 15:36:52 | demand-backup/9-check-cmd-flags | + '[' 6 -eq 0 ']' logger.go:42: 15:36:52 | demand-backup/9-check-cmd-flags | test step completed 9-check-cmd-flags logger.go:42: 15:36:52 | demand-backup/10-read-data | starting test step 10-read-data logger.go:42: 15:36:52 | demand-backup/10-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 10-read-data-${i} --from-literal=data="${data}" done] logger.go:42: 15:36:52 | demand-backup/10-read-data | + source ../../functions logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ realpath ../../.. logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:52 | demand-backup/10-read-data | ++++ pwd logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ test_name=demand-backup logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:36:52 | demand-backup/10-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ [[ -z 8.0 ]] logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:36:52 | demand-backup/10-read-data | ++++ which gdate logger.go:42: 15:36:52 | demand-backup/10-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:36:52 | demand-backup/10-read-data | ++++ which date logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ export date=/usr/sbin/date logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ date=/usr/sbin/date logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ oc get projects logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ : logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ kubectl get nodes logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ grep '^minikube' logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ which gsed logger.go:42: 15:36:52 | demand-backup/10-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:36:52 | demand-backup/10-read-data | +++ which sed logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ sed=/usr/sbin/sed logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ get_cluster_name logger.go:42: 15:36:52 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:36:53 | demand-backup/10-read-data | + cluster_name=demand-backup logger.go:42: 15:36:53 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 15:36:53 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:36:53 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:36:53 | demand-backup/10-read-data | ++ local 'host=-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:36:53 | demand-backup/10-read-data | +++ get_user_pass root logger.go:42: 15:36:53 | demand-backup/10-read-data | +++ local user=root logger.go:42: 15:36:53 | demand-backup/10-read-data | ++++ get_cluster_name logger.go:42: 15:36:53 | demand-backup/10-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:36:53 | demand-backup/10-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:36:53 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:36:53 | demand-backup/10-read-data | +++ base64 --decode logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ local pod= logger.go:42: 15:36:54 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 15:36:54 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:36:54 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 15:36:55 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 15:36:55 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:36:55 | demand-backup/10-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:36:55 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:36:56 | demand-backup/10-read-data | + data=100500 logger.go:42: 15:36:56 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 10-read-data-0 --from-literal=data=100500 logger.go:42: 15:36:56 | demand-backup/10-read-data | configmap/10-read-data-0 created logger.go:42: 15:36:56 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 15:36:56 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:36:56 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:36:56 | demand-backup/10-read-data | ++ local 'host=-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:36:56 | demand-backup/10-read-data | +++ get_user_pass root logger.go:42: 15:36:56 | demand-backup/10-read-data | +++ local user=root logger.go:42: 15:36:56 | demand-backup/10-read-data | ++++ get_cluster_name logger.go:42: 15:36:56 | demand-backup/10-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:36:56 | demand-backup/10-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:36:56 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:36:56 | demand-backup/10-read-data | +++ base64 --decode logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ local pod= logger.go:42: 15:36:57 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 15:36:57 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:36:57 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 15:36:58 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 15:36:58 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:36:58 | demand-backup/10-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:36:58 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:36:59 | demand-backup/10-read-data | + data=100500 logger.go:42: 15:36:59 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 10-read-data-1 --from-literal=data=100500 logger.go:42: 15:36:59 | demand-backup/10-read-data | configmap/10-read-data-1 created logger.go:42: 15:36:59 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 15:36:59 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:36:59 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:36:59 | demand-backup/10-read-data | ++ local 'host=-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:36:59 | demand-backup/10-read-data | +++ get_user_pass root logger.go:42: 15:36:59 | demand-backup/10-read-data | +++ local user=root logger.go:42: 15:36:59 | demand-backup/10-read-data | ++++ get_cluster_name logger.go:42: 15:36:59 | demand-backup/10-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:00 | demand-backup/10-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:37:00 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:37:00 | demand-backup/10-read-data | +++ base64 --decode logger.go:42: 15:37:00 | demand-backup/10-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:00 | demand-backup/10-read-data | ++ local pod= logger.go:42: 15:37:00 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 15:37:00 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 15:37:01 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:37:01 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:37:02 | demand-backup/10-read-data | + data=100500 logger.go:42: 15:37:02 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 10-read-data-2 --from-literal=data=100500 logger.go:42: 15:37:02 | demand-backup/10-read-data | configmap/10-read-data-2 created logger.go:42: 15:37:03 | demand-backup/10-read-data | test step completed 10-read-data logger.go:42: 15:37:03 | demand-backup/11-delete-data | starting test step 11-delete-data logger.go:42: 15:37:03 | demand-backup/11-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name))" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 12-delete-data-backup-source-${i} --from-literal=data="${data}" done] logger.go:42: 15:37:03 | demand-backup/11-delete-data | + source ../../functions logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ realpath ../../.. logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++++ pwd logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++ test_name=demand-backup logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ [[ -z 8.0 ]] logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++++ which gdate logger.go:42: 15:37:03 | demand-backup/11-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:37:03 | demand-backup/11-delete-data | ++++ which date logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ export date=/usr/sbin/date logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ date=/usr/sbin/date logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ oc get projects logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ : logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ grep '^minikube' logger.go:42: 15:37:03 | demand-backup/11-delete-data | +++ kubectl get nodes logger.go:42: 15:37:04 | demand-backup/11-delete-data | +++ which gsed logger.go:42: 15:37:04 | demand-backup/11-delete-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:37:04 | demand-backup/11-delete-data | +++ which sed logger.go:42: 15:37:04 | demand-backup/11-delete-data | ++ sed=/usr/sbin/sed logger.go:42: 15:37:04 | demand-backup/11-delete-data | +++ get_cluster_name logger.go:42: 15:37:04 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ local cluster=demand-backup logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ echo demand-backup-haproxy logger.go:42: 15:37:05 | demand-backup/11-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy' logger.go:42: 15:37:05 | demand-backup/11-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 15:37:05 | demand-backup/11-delete-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ get_user_pass root logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ local user=root logger.go:42: 15:37:05 | demand-backup/11-delete-data | +++ get_cluster_name logger.go:42: 15:37:05 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ local secret=demand-backup-secrets logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:37:05 | demand-backup/11-delete-data | ++ base64 --decode logger.go:42: 15:37:06 | demand-backup/11-delete-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:06 | demand-backup/11-delete-data | + local pod= logger.go:42: 15:37:06 | demand-backup/11-delete-data | ++ get_client_pod logger.go:42: 15:37:06 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:37:06 | demand-backup/11-delete-data | + client_pod=mysql-client logger.go:42: 15:37:06 | demand-backup/11-delete-data | + wait_pod mysql-client logger.go:42: 15:37:06 | demand-backup/11-delete-data | + local pod=mysql-client logger.go:42: 15:37:06 | demand-backup/11-delete-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:37:06 | demand-backup/11-delete-data | + set +o xtrace logger.go:42: 15:37:06 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 15:37:06 | demand-backup/11-delete-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:06 | demand-backup/11-delete-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:37:06 | demand-backup/11-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:37:07 | demand-backup/11-delete-data | + : logger.go:42: 15:37:07 | demand-backup/11-delete-data | ++ get_cluster_name logger.go:42: 15:37:07 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:08 | demand-backup/11-delete-data | + cluster_name=demand-backup logger.go:42: 15:37:08 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 15:37:08 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:37:08 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:37:08 | demand-backup/11-delete-data | ++ local 'host=-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:37:08 | demand-backup/11-delete-data | +++ get_user_pass root logger.go:42: 15:37:08 | demand-backup/11-delete-data | +++ local user=root logger.go:42: 15:37:08 | demand-backup/11-delete-data | ++++ get_cluster_name logger.go:42: 15:37:08 | demand-backup/11-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:08 | demand-backup/11-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:37:08 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:37:08 | demand-backup/11-delete-data | +++ base64 --decode logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 15:37:09 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 15:37:09 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:37:09 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 15:37:10 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 15:37:10 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:10 | demand-backup/11-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:37:10 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++ : logger.go:42: 15:37:11 | demand-backup/11-delete-data | + data= logger.go:42: 15:37:11 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 12-delete-data-backup-source-0 --from-literal=data= logger.go:42: 15:37:11 | demand-backup/11-delete-data | configmap/12-delete-data-backup-source-0 created logger.go:42: 15:37:11 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++ local 'host=-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:37:11 | demand-backup/11-delete-data | +++ get_user_pass root logger.go:42: 15:37:11 | demand-backup/11-delete-data | +++ local user=root logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++++ get_cluster_name logger.go:42: 15:37:11 | demand-backup/11-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:12 | demand-backup/11-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:37:12 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:37:12 | demand-backup/11-delete-data | +++ base64 --decode logger.go:42: 15:37:12 | demand-backup/11-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:12 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 15:37:12 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 15:37:12 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 15:37:13 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:37:13 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++ : logger.go:42: 15:37:14 | demand-backup/11-delete-data | + data= logger.go:42: 15:37:14 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 12-delete-data-backup-source-1 --from-literal=data= logger.go:42: 15:37:14 | demand-backup/11-delete-data | configmap/12-delete-data-backup-source-1 created logger.go:42: 15:37:14 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++ local 'host=-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:37:14 | demand-backup/11-delete-data | +++ get_user_pass root logger.go:42: 15:37:14 | demand-backup/11-delete-data | +++ local user=root logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++++ get_cluster_name logger.go:42: 15:37:14 | demand-backup/11-delete-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:15 | demand-backup/11-delete-data | +++ local secret=demand-backup-secrets logger.go:42: 15:37:15 | demand-backup/11-delete-data | +++ base64 --decode logger.go:42: 15:37:15 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:37:15 | demand-backup/11-delete-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:15 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 15:37:15 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 15:37:15 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 15:37:16 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:37:16 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:37:17 | demand-backup/11-delete-data | ++ : logger.go:42: 15:37:17 | demand-backup/11-delete-data | + data= logger.go:42: 15:37:17 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-fancy-weevil 12-delete-data-backup-source-2 --from-literal=data= logger.go:42: 15:37:18 | demand-backup/11-delete-data | configmap/12-delete-data-backup-source-2 created logger.go:42: 15:37:19 | demand-backup/11-delete-data | test step completed 11-delete-data logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | starting test step 12-restore-from-backup-source logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | running command: [sh -c set -o errexit set -o xtrace source ../../functions backup_name="demand-backup" restore_name="restore-of-demand-backup-with-backup-source" cluster_name=$(get_cluster_name) destination=$(kubectl -n "${NAMESPACE}" get ps-backup "${backup_name}" -o jsonpath='{.status.destination}') run_restore $restore_name "" $cluster_name "backup-source" $destination] logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | + source ../../functions logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ realpath ../../.. logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++++ pwd logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ test_name=demand-backup logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ [[ -z 8.0 ]] logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ MYSQL_VERSION=8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export MINIO_VER=5.4.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ MINIO_VER=5.4.0 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export VAULT_VER=0.16.1 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ VAULT_VER=0.16.1 logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++++ which gdate logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++++ which date logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ export date=/usr/sbin/date logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ date=/usr/sbin/date logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ oc get projects logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ : logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ kubectl get nodes logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ grep '^minikube' logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ which gsed logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | +++ which sed logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ sed=/usr/sbin/sed logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | + backup_name=demand-backup logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | + restore_name=restore-of-demand-backup-with-backup-source logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ get_cluster_name logger.go:42: 15:37:19 | demand-backup/12-restore-from-backup-source | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + cluster_name=demand-backup logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | ++ kubectl -n kuttl-test-fancy-weevil get ps-backup demand-backup -o 'jsonpath={.status.destination}' logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + destination=s3://operator-testing/ps/demand-backup/demand-backup-2026-05-05-15:32:01-full logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + run_restore restore-of-demand-backup-with-backup-source '' demand-backup backup-source s3://operator-testing/ps/demand-backup/demand-backup-2026-05-05-15:32:01-full logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local restore_name=restore-of-demand-backup-with-backup-source logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local backup_name= logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local cluster_name=demand-backup logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local prefix=-backup-source logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local destination=s3://operator-testing/ps/demand-backup/demand-backup-2026-05-05-15:32:01-full logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | + local pitr_date= logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | ++ detect_k8s_platform logger.go:42: 15:37:20 | demand-backup/12-restore-from-backup-source | ++ set +x logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ echo gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local platform=gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ get_storage_alias gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ set +x logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ echo default logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local storage_alias=default logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ get_platform_alias gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ set +x logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | ++ echo gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local platform_alias=gke logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local restore_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source.yaml logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local restore_template_storage=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source-default.yaml logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + local restore_template_platform=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source-gke.yaml logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source-default.yaml ]] logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source-gke.yaml ]] logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + [[ ! -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source.yaml ]] logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + echo 'Running restore '\''restore-of-demand-backup-with-backup-source'\'' from backup '\'''\'' using '\''/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source.yaml'\''' logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | Running restore 'restore-of-demand-backup-with-backup-source' from backup '' using '/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source.yaml' logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + yq eval $'\n\t\t.metadata.name = "restore-of-demand-backup-with-backup-source" |\n\t\t.spec.backupName = "" |\n\t\t.spec.clusterName = "demand-backup" |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "") |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "s3://operator-testing/ps/demand-backup/demand-backup-2026-05-05-15:32:01-full") |\n\t\t(.. | select(tag == "!!str")) |= sub(""; "kuttl-test-fancy-weevil")\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/restore-backup-source.yaml logger.go:42: 15:37:21 | demand-backup/12-restore-from-backup-source | + kubectl apply -n kuttl-test-fancy-weevil -f - logger.go:42: 15:37:22 | demand-backup/12-restore-from-backup-source | perconaservermysqlrestore.ps.percona.com/restore-of-demand-backup-with-backup-source created logger.go:42: 15:41:31 | demand-backup/12-restore-from-backup-source | test step completed 12-restore-from-backup-source logger.go:42: 15:41:31 | demand-backup/13-read-data | starting test step 13-read-data logger.go:42: 15:41:31 | demand-backup/13-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 13-read-data-backup-source-${i} --from-literal=data="${data}" done] logger.go:42: 15:41:31 | demand-backup/13-read-data | + source ../../functions logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ realpath ../../.. logger.go:42: 15:41:31 | demand-backup/13-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:31 | demand-backup/13-read-data | ++++ pwd logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:41:31 | demand-backup/13-read-data | ++ test_name=demand-backup logger.go:42: 15:41:31 | demand-backup/13-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:41:31 | demand-backup/13-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ [[ -z 8.0 ]] logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:41:31 | demand-backup/13-read-data | ++++ which gdate logger.go:42: 15:41:31 | demand-backup/13-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:41:31 | demand-backup/13-read-data | ++++ which date logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ export date=/usr/sbin/date logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ date=/usr/sbin/date logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ oc get projects logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ : logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ kubectl get nodes logger.go:42: 15:41:31 | demand-backup/13-read-data | +++ grep '^minikube' logger.go:42: 15:41:32 | demand-backup/13-read-data | +++ which gsed logger.go:42: 15:41:32 | demand-backup/13-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:41:32 | demand-backup/13-read-data | +++ which sed logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ sed=/usr/sbin/sed logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ get_cluster_name logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:41:32 | demand-backup/13-read-data | + cluster_name=demand-backup logger.go:42: 15:41:32 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:41:32 | demand-backup/13-read-data | ++ local 'host=-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:41:32 | demand-backup/13-read-data | +++ get_user_pass root logger.go:42: 15:41:32 | demand-backup/13-read-data | +++ local user=root logger.go:42: 15:41:32 | demand-backup/13-read-data | ++++ get_cluster_name logger.go:42: 15:41:32 | demand-backup/13-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:41:33 | demand-backup/13-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:41:33 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:41:33 | demand-backup/13-read-data | +++ base64 --decode logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ local pod= logger.go:42: 15:41:34 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 15:41:34 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:41:34 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 15:41:35 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 15:41:35 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:35 | demand-backup/13-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:41:35 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:41:36 | demand-backup/13-read-data | + data=100500 logger.go:42: 15:41:36 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 13-read-data-backup-source-0 --from-literal=data=100500 logger.go:42: 15:41:36 | demand-backup/13-read-data | configmap/13-read-data-backup-source-0 created logger.go:42: 15:41:36 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 15:41:36 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:41:36 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:41:36 | demand-backup/13-read-data | ++ local 'host=-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:41:36 | demand-backup/13-read-data | +++ get_user_pass root logger.go:42: 15:41:36 | demand-backup/13-read-data | +++ local user=root logger.go:42: 15:41:36 | demand-backup/13-read-data | ++++ get_cluster_name logger.go:42: 15:41:36 | demand-backup/13-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:41:37 | demand-backup/13-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:41:37 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:41:37 | demand-backup/13-read-data | +++ base64 --decode logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ local pod= logger.go:42: 15:41:37 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 15:41:37 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:41:37 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 15:41:38 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 15:41:38 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:38 | demand-backup/13-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:41:38 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:41:39 | demand-backup/13-read-data | + data=100500 logger.go:42: 15:41:39 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 13-read-data-backup-source-1 --from-literal=data=100500 logger.go:42: 15:41:39 | demand-backup/13-read-data | configmap/13-read-data-backup-source-1 created logger.go:42: 15:41:39 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 15:41:39 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:41:39 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:41:39 | demand-backup/13-read-data | ++ local 'host=-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:41:39 | demand-backup/13-read-data | +++ get_user_pass root logger.go:42: 15:41:39 | demand-backup/13-read-data | +++ local user=root logger.go:42: 15:41:39 | demand-backup/13-read-data | ++++ get_cluster_name logger.go:42: 15:41:39 | demand-backup/13-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:41:40 | demand-backup/13-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:41:40 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:41:40 | demand-backup/13-read-data | +++ base64 --decode logger.go:42: 15:41:40 | demand-backup/13-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:40 | demand-backup/13-read-data | ++ local pod= logger.go:42: 15:41:40 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 15:41:40 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 15:41:41 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:41:41 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:41:42 | demand-backup/13-read-data | + data=100500 logger.go:42: 15:41:42 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 13-read-data-backup-source-2 --from-literal=data=100500 logger.go:42: 15:41:42 | demand-backup/13-read-data | configmap/13-read-data-backup-source-2 created logger.go:42: 15:41:43 | demand-backup/13-read-data | test step completed 13-read-data logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | starting test step 14-create-backup-failure logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Gets first storage defined and run backup cluster_name=$(get_cluster_name) storage_name=$(kubectl get ps $cluster_name -n ${NAMESPACE} -o yaml | yq '(.spec.backup.storages // {}) | keys | .[0]') run_backup demand-backup-fail $storage_name $cluster_name "failure" ] logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | + source ../../functions logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ realpath ../../.. logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++++ pwd logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++ test_name=demand-backup logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ [[ -z 8.0 ]] logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ MYSQL_VERSION=8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export MINIO_VER=5.4.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ MINIO_VER=5.4.0 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export VAULT_VER=0.16.1 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ VAULT_VER=0.16.1 logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++++ which gdate logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | ++++ which date logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ export date=/usr/sbin/date logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ date=/usr/sbin/date logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ oc get projects logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ : logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ kubectl get nodes logger.go:42: 15:41:43 | demand-backup/14-create-backup-failure | +++ grep '^minikube' logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | +++ which gsed logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | +++ which sed logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | ++ sed=/usr/sbin/sed logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | ++ get_cluster_name logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | + cluster_name=demand-backup logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | ++ kubectl get ps demand-backup -n kuttl-test-fancy-weevil -o yaml logger.go:42: 15:41:44 | demand-backup/14-create-backup-failure | ++ yq '(.spec.backup.storages // {}) | keys | .[0]' logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + storage_name=minio logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + run_backup demand-backup-fail minio demand-backup failure logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + local backup_name=demand-backup-fail logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + local storage_name=minio logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + local cluster_name=demand-backup logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + local prefix=-failure logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + local backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy/backup/backup.yaml logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + test_backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup-failure.yaml logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + [[ -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup-failure.yaml ]] logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + backup_template=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup-failure.yaml logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + echo 'Running backup demand-backup-fail using storage minio' logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | Running backup demand-backup-fail using storage minio logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + yq eval $'\n\t\t.metadata.name = "demand-backup-fail" |\n\t\t.spec.storageName = "minio" |\n\t\t.spec.clusterName = "demand-backup"\n\t' /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf/backup/backup-failure.yaml logger.go:42: 15:41:45 | demand-backup/14-create-backup-failure | + kubectl apply -n kuttl-test-fancy-weevil -f - logger.go:42: 15:41:46 | demand-backup/14-create-backup-failure | perconaservermysqlbackup.ps.percona.com/demand-backup-fail created Warning: unknown field "status.status" logger.go:42: 15:41:47 | demand-backup/14-create-backup-failure | PerconaServerMySQL:kuttl-test-fancy-weevil/demand-backup updated logger.go:42: 15:41:48 | demand-backup/14-create-backup-failure | running command: [sh -c set -e # Use orchestrator-client to check that source backup pod is in downtimed state during backup backup_name="demand-backup-fail" backup_source_pod=$(kubectl get ps-backup -n ${NAMESPACE} $backup_name -o jsonpath='{.status.backupSource}' | awk -F'.' '{print $1}') source_downtimed=$(kubectl exec demand-backup-orc-0 -n ${NAMESPACE} -c orchestrator -- bash -c 'orchestrator-client -c topology -i $(orchestrator-client -c clusters)' | grep $backup_source_pod | grep -c 'downtimed') if [[ $source_downtimed != 1 ]]; then echo "Downtime did not start!" echo $source_downtimed exit 1 fi] logger.go:42: 15:41:50 | demand-backup/14-create-backup-failure | test step completed 14-create-backup-failure logger.go:42: 15:41:50 | demand-backup/15- | starting test step 15- logger.go:42: 15:43:19 | demand-backup/15- | test step completed 15- logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | starting test step 16-check-failure-unsupported-var logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Check that unsupported arg was passed to backup. backup_name="demand-backup-fail" backup_source_pod=$(kubectl get ps-backup -n ${NAMESPACE} $backup_name -o jsonpath='{.status.backupSource}' | awk -F'.' '{print $1}') # Logs of unsuccessful backups are stored only on the backup source pod. Use it to check logs. xtrabackup_flag_count=$(kubectl logs -n ${NAMESPACE} $backup_source_pod -c xtrabackup | grep -c -- "--customnotsupported") if [ "$xtrabackup_flag_count" -eq 0 ]; then echo "custom flag --customnotsupported was provided to the backup but it's not mentioned in the logs" exit 1 fi] logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | + source ../../functions logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ realpath ../../.. logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++++ pwd logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++ test_name=demand-backup logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ [[ -z 8.0 ]] logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ MYSQL_VERSION=8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export MINIO_VER=5.4.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ MINIO_VER=5.4.0 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export VAULT_VER=0.16.1 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ VAULT_VER=0.16.1 logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++++ which gdate logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | ++++ which date logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ export date=/usr/sbin/date logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ date=/usr/sbin/date logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ oc get projects logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ : logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ kubectl get nodes logger.go:42: 15:43:19 | demand-backup/16-check-failure-unsupported-var | +++ grep '^minikube' logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | +++ which gsed logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | +++ which sed logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | ++ sed=/usr/sbin/sed logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | + backup_name=demand-backup-fail logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | ++ kubectl get ps-backup -n kuttl-test-fancy-weevil demand-backup-fail -o 'jsonpath={.status.backupSource}' logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | ++ awk -F. '{print $1}' logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | + backup_source_pod=demand-backup-mysql-1 logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | ++ kubectl logs -n kuttl-test-fancy-weevil demand-backup-mysql-1 -c xtrabackup logger.go:42: 15:43:20 | demand-backup/16-check-failure-unsupported-var | ++ grep -c -- --customnotsupported logger.go:42: 15:43:21 | demand-backup/16-check-failure-unsupported-var | + xtrabackup_flag_count=8 logger.go:42: 15:43:21 | demand-backup/16-check-failure-unsupported-var | + '[' 8 -eq 0 ']' logger.go:42: 15:43:21 | demand-backup/16-check-failure-unsupported-var | running command: [sh -c # Use orchestrator-client to check that downtime for source node finished after the backup failed. backup_name="demand-backup-fail" backup_source_pod=$(kubectl get ps-backup -n ${NAMESPACE} $backup_name -o jsonpath='{.status.backupSource}' | awk -F'.' '{print $1}') source_downtimed=$(kubectl exec demand-backup-orc-0 -n ${NAMESPACE} -c orchestrator -- bash -c 'orchestrator-client -c topology -i $(orchestrator-client -c clusters)' | grep -c 'downtimed') if [[ $source_downtimed != 0 ]]; then echo "Downtime did not finish!" exit 1 fi] logger.go:42: 15:43:22 | demand-backup/16-check-failure-unsupported-var | test step completed 16-check-failure-unsupported-var logger.go:42: 15:43:22 | demand-backup/17-write-data | starting test step 17-write-data logger.go:42: 15:43:22 | demand-backup/17-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name))" run_mysql \ "INSERT myDB.myTable (id) VALUES (100501)" \ "-h $(get_haproxy_svc $(get_cluster_name))"] logger.go:42: 15:43:22 | demand-backup/17-write-data | + source ../../functions logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ realpath ../../.. logger.go:42: 15:43:22 | demand-backup/17-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:22 | demand-backup/17-write-data | ++++ pwd logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:43:22 | demand-backup/17-write-data | ++ test_name=demand-backup logger.go:42: 15:43:22 | demand-backup/17-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:22 | demand-backup/17-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ [[ -z 8.0 ]] logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:43:22 | demand-backup/17-write-data | ++++ which gdate logger.go:42: 15:43:22 | demand-backup/17-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:22 | demand-backup/17-write-data | ++++ which date logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ export date=/usr/sbin/date logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ date=/usr/sbin/date logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ oc get projects logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ : logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ kubectl get nodes logger.go:42: 15:43:22 | demand-backup/17-write-data | +++ grep '^minikube' logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ which gsed logger.go:42: 15:43:23 | demand-backup/17-write-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ which sed logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ sed=/usr/sbin/sed logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ get_cluster_name logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ local cluster=demand-backup logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ echo demand-backup-haproxy logger.go:42: 15:43:23 | demand-backup/17-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h demand-backup-haproxy' logger.go:42: 15:43:23 | demand-backup/17-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 15:43:23 | demand-backup/17-write-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ get_user_pass root logger.go:42: 15:43:23 | demand-backup/17-write-data | ++ local user=root logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ get_cluster_name logger.go:42: 15:43:23 | demand-backup/17-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:24 | demand-backup/17-write-data | ++ local secret=demand-backup-secrets logger.go:42: 15:43:24 | demand-backup/17-write-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:43:24 | demand-backup/17-write-data | ++ base64 --decode logger.go:42: 15:43:24 | demand-backup/17-write-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:24 | demand-backup/17-write-data | + local pod= logger.go:42: 15:43:24 | demand-backup/17-write-data | ++ get_client_pod logger.go:42: 15:43:24 | demand-backup/17-write-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:43:25 | demand-backup/17-write-data | + client_pod=mysql-client logger.go:42: 15:43:25 | demand-backup/17-write-data | + wait_pod mysql-client logger.go:42: 15:43:25 | demand-backup/17-write-data | + local pod=mysql-client logger.go:42: 15:43:25 | demand-backup/17-write-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:43:25 | demand-backup/17-write-data | + set +o xtrace logger.go:42: 15:43:25 | demand-backup/17-write-data | mysql-clienttrue logger.go:42: 15:43:25 | demand-backup/17-write-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:25 | demand-backup/17-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:43:25 | demand-backup/17-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:43:26 | demand-backup/17-write-data | + : logger.go:42: 15:43:26 | demand-backup/17-write-data | +++ get_cluster_name logger.go:42: 15:43:26 | demand-backup/17-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ local cluster=demand-backup logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ echo demand-backup-haproxy logger.go:42: 15:43:27 | demand-backup/17-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100501)' '-h demand-backup-haproxy' logger.go:42: 15:43:27 | demand-backup/17-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100501)' logger.go:42: 15:43:27 | demand-backup/17-write-data | + local 'host=-h demand-backup-haproxy' logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ get_user_pass root logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ local user=root logger.go:42: 15:43:27 | demand-backup/17-write-data | +++ get_cluster_name logger.go:42: 15:43:27 | demand-backup/17-write-data | +++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ local secret=demand-backup-secrets logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:43:27 | demand-backup/17-write-data | ++ base64 --decode logger.go:42: 15:43:28 | demand-backup/17-write-data | + local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:28 | demand-backup/17-write-data | + local pod= logger.go:42: 15:43:28 | demand-backup/17-write-data | ++ get_client_pod logger.go:42: 15:43:28 | demand-backup/17-write-data | ++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:43:28 | demand-backup/17-write-data | + client_pod=mysql-client logger.go:42: 15:43:28 | demand-backup/17-write-data | + wait_pod mysql-client logger.go:42: 15:43:28 | demand-backup/17-write-data | + local pod=mysql-client logger.go:42: 15:43:28 | demand-backup/17-write-data | + local ns=kuttl-test-fancy-weevil logger.go:42: 15:43:28 | demand-backup/17-write-data | + set +o xtrace logger.go:42: 15:43:29 | demand-backup/17-write-data | mysql-clienttrue logger.go:42: 15:43:29 | demand-backup/17-write-data | + kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100501)" | mysql -sN -h demand-backup-haproxy -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:29 | demand-backup/17-write-data | + /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:43:29 | demand-backup/17-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:43:29 | demand-backup/17-write-data | + : logger.go:42: 15:43:29 | demand-backup/17-write-data | test step completed 17-write-data logger.go:42: 15:43:29 | demand-backup/18-read-data | starting test step 18-read-data logger.go:42: 15:43:29 | demand-backup/18-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql") kubectl create configmap -n "${NAMESPACE}" 18-read-data-fail-${i} --from-literal=data="${data}" done] logger.go:42: 15:43:29 | demand-backup/18-read-data | + source ../../functions logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ realpath ../../.. logger.go:42: 15:43:29 | demand-backup/18-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:29 | demand-backup/18-read-data | ++++ pwd logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:43:29 | demand-backup/18-read-data | ++ test_name=demand-backup logger.go:42: 15:43:29 | demand-backup/18-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:29 | demand-backup/18-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ [[ -z 8.0 ]] logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ MYSQL_VERSION=8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 15:43:29 | demand-backup/18-read-data | ++++ which gdate logger.go:42: 15:43:29 | demand-backup/18-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:29 | demand-backup/18-read-data | ++++ which date logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ export date=/usr/sbin/date logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ date=/usr/sbin/date logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ oc get projects logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ : logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ kubectl get nodes logger.go:42: 15:43:29 | demand-backup/18-read-data | +++ grep '^minikube' logger.go:42: 15:43:30 | demand-backup/18-read-data | +++ which gsed logger.go:42: 15:43:30 | demand-backup/18-read-data | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:30 | demand-backup/18-read-data | +++ which sed logger.go:42: 15:43:30 | demand-backup/18-read-data | ++ sed=/usr/sbin/sed logger.go:42: 15:43:30 | demand-backup/18-read-data | ++ get_cluster_name logger.go:42: 15:43:30 | demand-backup/18-read-data | ++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:31 | demand-backup/18-read-data | + cluster_name=demand-backup logger.go:42: 15:43:31 | demand-backup/18-read-data | + for i in 0 1 2 logger.go:42: 15:43:31 | demand-backup/18-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:43:31 | demand-backup/18-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:43:31 | demand-backup/18-read-data | ++ local 'host=-h demand-backup-mysql-0.demand-backup-mysql' logger.go:42: 15:43:31 | demand-backup/18-read-data | +++ get_user_pass root logger.go:42: 15:43:31 | demand-backup/18-read-data | +++ local user=root logger.go:42: 15:43:31 | demand-backup/18-read-data | ++++ get_cluster_name logger.go:42: 15:43:31 | demand-backup/18-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:31 | demand-backup/18-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:43:31 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:43:31 | demand-backup/18-read-data | +++ base64 --decode logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ local pod= logger.go:42: 15:43:32 | demand-backup/18-read-data | +++ get_client_pod logger.go:42: 15:43:32 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ client_pod=mysql-client logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ wait_pod mysql-client logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ local pod=mysql-client logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:43:32 | demand-backup/18-read-data | ++ set +o xtrace logger.go:42: 15:43:33 | demand-backup/18-read-data | mysql-clienttrue logger.go:42: 15:43:33 | demand-backup/18-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:33 | demand-backup/18-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:43:33 | demand-backup/18-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:43:34 | demand-backup/18-read-data | + data=$'100500\n100501' logger.go:42: 15:43:34 | demand-backup/18-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 18-read-data-fail-0 $'--from-literal=data=100500\n100501' logger.go:42: 15:43:34 | demand-backup/18-read-data | configmap/18-read-data-fail-0 created logger.go:42: 15:43:34 | demand-backup/18-read-data | + for i in 0 1 2 logger.go:42: 15:43:34 | demand-backup/18-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:43:34 | demand-backup/18-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:43:34 | demand-backup/18-read-data | ++ local 'host=-h demand-backup-mysql-1.demand-backup-mysql' logger.go:42: 15:43:34 | demand-backup/18-read-data | +++ get_user_pass root logger.go:42: 15:43:34 | demand-backup/18-read-data | +++ local user=root logger.go:42: 15:43:34 | demand-backup/18-read-data | ++++ get_cluster_name logger.go:42: 15:43:34 | demand-backup/18-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:35 | demand-backup/18-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:43:35 | demand-backup/18-read-data | +++ base64 --decode logger.go:42: 15:43:35 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:43:35 | demand-backup/18-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:35 | demand-backup/18-read-data | ++ local pod= logger.go:42: 15:43:35 | demand-backup/18-read-data | +++ get_client_pod logger.go:42: 15:43:35 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ client_pod=mysql-client logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ wait_pod mysql-client logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ local pod=mysql-client logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ set +o xtrace logger.go:42: 15:43:36 | demand-backup/18-read-data | mysql-clienttrue logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:43:36 | demand-backup/18-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:43:37 | demand-backup/18-read-data | + data=$'100500\n100501' logger.go:42: 15:43:37 | demand-backup/18-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 18-read-data-fail-1 $'--from-literal=data=100500\n100501' logger.go:42: 15:43:37 | demand-backup/18-read-data | configmap/18-read-data-fail-1 created logger.go:42: 15:43:37 | demand-backup/18-read-data | + for i in 0 1 2 logger.go:42: 15:43:37 | demand-backup/18-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:43:37 | demand-backup/18-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:43:37 | demand-backup/18-read-data | ++ local 'host=-h demand-backup-mysql-2.demand-backup-mysql' logger.go:42: 15:43:37 | demand-backup/18-read-data | +++ get_user_pass root logger.go:42: 15:43:37 | demand-backup/18-read-data | +++ local user=root logger.go:42: 15:43:37 | demand-backup/18-read-data | ++++ get_cluster_name logger.go:42: 15:43:37 | demand-backup/18-read-data | ++++ kubectl -n kuttl-test-fancy-weevil get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:43:38 | demand-backup/18-read-data | +++ local secret=demand-backup-secrets logger.go:42: 15:43:38 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get secret demand-backup-secrets -o 'jsonpath={.data.root}' logger.go:42: 15:43:38 | demand-backup/18-read-data | +++ base64 --decode logger.go:42: 15:43:38 | demand-backup/18-read-data | ++ local 'user=-uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:38 | demand-backup/18-read-data | ++ local pod= logger.go:42: 15:43:38 | demand-backup/18-read-data | +++ get_client_pod logger.go:42: 15:43:38 | demand-backup/18-read-data | +++ kubectl -n kuttl-test-fancy-weevil get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ client_pod=mysql-client logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ wait_pod mysql-client logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ local pod=mysql-client logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ local ns=kuttl-test-fancy-weevil logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ set +o xtrace logger.go:42: 15:43:39 | demand-backup/18-read-data | mysql-clienttrue logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ kubectl -n kuttl-test-fancy-weevil exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -p'\''y&$zcdTi#nBY[lNKJk'\''' logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ /usr/sbin/sed -e 's/mysql: //' logger.go:42: 15:43:39 | demand-backup/18-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:43:40 | demand-backup/18-read-data | + data=$'100500\n100501' logger.go:42: 15:43:40 | demand-backup/18-read-data | + kubectl create configmap -n kuttl-test-fancy-weevil 18-read-data-fail-2 $'--from-literal=data=100500\n100501' logger.go:42: 15:43:41 | demand-backup/18-read-data | configmap/18-read-data-fail-2 created logger.go:42: 15:43:42 | demand-backup/18-read-data | test step completed 18-read-data logger.go:42: 15:43:42 | demand-backup/19-delete-all-backups | starting test step 19-delete-all-backups logger.go:42: 15:43:42 | demand-backup/19-delete-all-backups | running command: [sh -c set -o errexit source ../../functions verify_all_backups_deletion] logger.go:42: 15:43:42 | demand-backup/19-delete-all-backups | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:42 | demand-backup/19-delete-all-backups | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:43 | demand-backup/19-delete-all-backups | Checking backup: demand-backup logger.go:42: 15:43:43 | demand-backup/19-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup" deleted from kuttl-test-fancy-weevil namespace logger.go:42: 15:43:49 | demand-backup/19-delete-all-backups | Backup removed: demand-backup logger.go:42: 15:43:49 | demand-backup/19-delete-all-backups | Checking backup: demand-backup-fail logger.go:42: 15:43:50 | demand-backup/19-delete-all-backups | perconaservermysqlbackup.ps.percona.com "demand-backup-fail" deleted from kuttl-test-fancy-weevil namespace logger.go:42: 15:43:56 | demand-backup/19-delete-all-backups | Backup removed: demand-backup-fail logger.go:42: 15:43:56 | demand-backup/19-delete-all-backups | test step completed 19-delete-all-backups logger.go:42: 15:43:56 | demand-backup/97-drop-finalizer | starting test step 97-drop-finalizer logger.go:42: 15:43:56 | demand-backup/97-drop-finalizer | PerconaServerMySQL:kuttl-test-fancy-weevil/demand-backup updated logger.go:42: 15:43:56 | demand-backup/97-drop-finalizer | test step completed 97-drop-finalizer logger.go:42: 15:43:56 | demand-backup/98-remove-cluster-gracefully | starting test step 98-remove-cluster-gracefully logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | + source ../../functions logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++++ pwd logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++ test_name=demand-backup logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/vars.sh logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/deploy logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/conf logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ TEST_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/e2e-tests/tests/demand-backup/conf logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-1238 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ GIT_BRANCH=PR-1238 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ VERSION=PR-1238-7677a7b6 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1238-7677a7b6 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ [[ -z 8.0 ]] logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export MYSQL_VERSION=8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ MYSQL_VERSION=8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_BINLOG_SERVER=perconalab/percona-binlog-server:0.2.1 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.20.2 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++++ which gdate logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++++ which date logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ export date=/usr/sbin/date logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ oc get projects logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ : logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ which gsed logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1238/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | +++ which sed logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | ++ sed=/usr/sbin/sed logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | + destroy_operator logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 15:43:57 | demand-backup/98-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:43:58 | demand-backup/98-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted from ps-operator namespace logger.go:42: 15:43:58 | demand-backup/98-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 15:43:58 | demand-backup/98-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 15:43:58 | demand-backup/98-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 15:43:59 | demand-backup/98-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 15:44:09 | demand-backup/98-remove-cluster-gracefully | test step completed 98-remove-cluster-gracefully logger.go:42: 15:44:09 | demand-backup | demand-backup events from ns kuttl-test-fancy-weevil: logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:09 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/mysql-client to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:10 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:15 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:16 +0000 UTC Normal ReplicaSet.apps minio-service-649c5b46f8 SuccessfulCreate Created pod: minio-service-649c5b46f8-5fcm6 replicaset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:16 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-649c5b46f8 from 0 to 1 deployment-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:16 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:16 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/minio-service" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:17 +0000 UTC Normal Pod minio-service-post-job-cs4ch Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/minio-service-post-job-cs4ch to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:17 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-cs4ch job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:18 +0000 UTC Normal Pod minio-service-post-job-cs4ch.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:19 +0000 UTC Normal Pod minio-service-post-job-cs4ch.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" in 1.892s (1.892s including waiting). Image size: 28122288 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:19 +0000 UTC Normal Pod minio-service-post-job-cs4ch.spec.containers{minio-make-user} Created Created container: minio-make-user kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:19 +0000 UTC Normal Pod minio-service-post-job-cs4ch.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:20 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/minio-service-649c5b46f8-5fcm6 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:20 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-a5496917-eb62-4818-bd7a-d22e7bdc7537 pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:27 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a5496917-eb62-4818-bd7a-d22e7bdc7537" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:29 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 18.733s (18.733s including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:33 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" in 4.698s (4.698s including waiting). Image size: 62642371 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:34 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6.spec.containers{minio} Created Created container: minio kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:34 +0000 UTC Normal Pod minio-service-649c5b46f8-5fcm6.spec.containers{minio} Started Started container minio kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:42 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:44 +0000 UTC Normal Pod aws-cli Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/aws-cli to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:44 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:47 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 2.433s (2.433s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:47 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container: aws-cli kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:25:47 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-0" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-0 Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-mysql NoPods No matching pods found controllermanager logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:00 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:01 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:01 +0000 UTC Warning Pod demand-backup-orc-0 FailedMount MountVolume.SetUp failed for volume "users" : secret "internal-demand-backup" not found kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:01 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-0 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:01 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-orchestrator NoPods No matching pods found controllermanager logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:01 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged -> Initializing ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:02 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:02 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 130ms (130ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:02 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:02 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:04 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24 pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.397s (1.397s including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:06 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 103ms (103ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:06 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:06 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:09 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 2.999s (2.999s including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:18 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:34 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 15.434s (15.434s including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:34 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:34 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:34 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:37 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:37 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-1 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:38 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:40 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 2.772s (2.772s including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:40 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:40 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:45 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.353s (1.353s including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 110ms (110ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:46 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 17.209s (17.209s including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:55 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 3.577s (3.577s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:55 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:26:55 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:07 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:07 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:07 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:07 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-1 Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:07 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:10 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-2dcc229a-d28c-4ca3-93bc-234260129f1a pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 132ms (132ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal PodDisruptionBudget.policy demand-backup-haproxy NoPods No matching pods found controllermanager logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-0 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:11 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:13 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.126s (2.126s including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 101ms (101ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:18 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2dcc229a-d28c-4ca3-93bc-234260129f1a" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 118ms (118ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:19 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-2 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:20 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:20 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 130ms (130ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:20 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:20 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:23 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:24 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.377s (1.377s including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 93ms (93ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:25 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:32 +0000 UTC Warning Pod demand-backup-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 15.789s (15.789s including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:55 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 16.744s (16.744s including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:55 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:56 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:27:56 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:00 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 4.407s (4.407s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:00 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:00 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:12 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:28:10 Waiting for MySQL ready state 2026/05/05 15:28:10 MySQL is ready 2026/05/05 15:28:10 Peers: [3133653162313236.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3835373930336564.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:28:10 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:28:10 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:28:10 lookup demand-backup-mysql-1 [10.202.233.10] 2026/05/05 15:28:10 PodIP: 10.202.233.10 2026/05/05 15:28:10 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.5] 2026/05/05 15:28:10 PrimaryIP: 10.202.234.5 2026/05/05 15:28:10 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:28:11 Opening connection to 10.202.233.10 2026/05/05 15:28:11 Clone required: true 2026/05/05 15:28:11 Checking if a clone in progress 2026/05/05 15:28:11 Clone in progress: false 2026/05/05 15:28:11 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:28:12 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:12 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:18 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 117ms (117ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:41 +0000 UTC Warning Pod demand-backup-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:50 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:50 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-2 Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:50 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:51 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:51 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:54 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-8a9abb0c-17b3-4953-a5b1-9511bf789c1b pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:54 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 137ms (137ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:28:58 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-1 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:01 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:02 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-8a9abb0c-17b3-4953-a5b1-9511bf789c1b" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.166s (2.166s including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 104ms (104ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:03 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:04 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 236ms (236ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:04 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:04 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 102ms (102ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 142ms (142ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-2 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 14.689s (14.689s including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:20 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:21 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.043s (2.043s including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 102ms (102ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 6.386s (6.386s including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:34 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:29:33 Waiting for MySQL ready state 2026/05/05 15:29:33 MySQL is ready 2026/05/05 15:29:33 Peers: [3133653162313236.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3465366261363934.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3835373930336564.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:29:33 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:29:33 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:29:33 lookup demand-backup-mysql-2 [10.202.232.9] 2026/05/05 15:29:33 PodIP: 10.202.232.9 2026/05/05 15:29:33 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.5] 2026/05/05 15:29:33 PrimaryIP: 10.202.234.5 2026/05/05 15:29:33 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:29:33 Opening connection to 10.202.232.9 2026/05/05 15:29:33 Clone required: true 2026/05/05 15:29:33 Checking if a clone in progress 2026/05/05 15:29:33 Clone in progress: false 2026/05/05 15:29:33 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:29:34 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:34 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:29:43 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 105ms (105ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:15 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:27 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:30:27 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:28 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged Ready -> Initializing ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:32 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:30:32 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:33 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:30:33 readiness check failed: replication is stopped kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:34 +0000 UTC Warning Pod demand-backup-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: command timed out: "/opt/percona/haproxy_readiness_check.sh" timed out after 1s kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:35 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:30:35 readiness check failed: replication is stopped kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:36 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:37 +0000 UTC Warning Pod demand-backup-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:45 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:46 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:46 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 122ms (122ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:46 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:46 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 108ms (108ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 108ms (108ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 112ms (112ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:30:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:31:07 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:31:06 Waiting for MySQL ready state 2026/05/05 15:31:06 MySQL is ready 2026/05/05 15:31:06 Peers: [3465366261363934.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3835373930336564.demand-backup-mysql-unready.kuttl-test-fancy-weevil 6662643139353562.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:31:06 FQDN: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:31:06 Primary: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:31:06 lookup demand-backup-mysql-0 [10.202.234.8] 2026/05/05 15:31:06 PodIP: 10.202.234.8 2026/05/05 15:31:06 lookup demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.233.10] 2026/05/05 15:31:06 PrimaryIP: 10.202.233.10 2026/05/05 15:31:06 Donor: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:31:06 Opening connection to 10.202.234.8 2026/05/05 15:31:06 Clone required: true 2026/05/05 15:31:06 Checking if a clone in progress 2026/05/05 15:31:06 Clone in progress: false 2026/05/05 15:31:06 Cloning from demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:31:07 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:31:07 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:31:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 106ms (106ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:01 +0000 UTC Normal Pod xb-demand-backup-minio-72skg Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-demand-backup-minio-72skg to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:01 +0000 UTC Normal Job.batch xb-demand-backup-minio SuccessfulCreate Created pod: xb-demand-backup-minio-72skg job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:02 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:02 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 135ms (135ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:02 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:02 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:04 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:04 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 108ms (108ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:04 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:04 +0000 UTC Normal Pod xb-demand-backup-minio-72skg.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:10 +0000 UTC Normal Job.batch xb-demand-backup-minio Completed Job completed job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-2 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-2 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:31 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:32 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-1 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:32 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:32 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:32 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-1 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:33 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:33 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:33 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-0 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:34 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-0 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:35 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:35 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:35 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:35 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:35 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:35 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:40 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:40 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:45 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:45 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:46 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:46 readiness check failed: replication is stopped kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:50 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:51 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:51 readiness check failed: replication is stopped kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:55 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:55 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:56 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:56 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:56 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:32:56 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:32:57 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:01 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-restore-restore-of-demand-backup-5lrkc to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:01 +0000 UTC Warning Pod xb-restore-restore-of-demand-backup-5lrkc FailedAttachVolume Multi-Attach error for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:01 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup SuccessfulCreate Created pod: xb-restore-restore-of-demand-backup-5lrkc job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:20 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:22 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:22 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 199ms (199ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:22 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:22 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:24 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:24 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 96ms (96ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:24 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:24 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-5lrkc.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:31 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup Completed Job completed job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:32 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:32 +0000 UTC Warning Pod demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:32 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:32 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:32 +0000 UTC Normal PerconaServerMySQL.ps.percona.com demand-backup ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:33 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 138ms (138ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:33 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:33 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 423ms (423ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 130ms (130ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:35 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:50 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 143ms (143ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:51 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 104ms (104ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 111ms (111ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:54 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:54 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:54 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 109ms (109ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:54 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:33:54 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:07 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 113ms (113ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:09 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 132ms (132ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 121ms (121ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:26 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:26 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:27 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:30 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-3892a4a4-573c-44bc-be50-3638f9660b93 pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:30 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:31 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:31 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:31 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 119ms (119ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:31 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:31 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 420ms (420ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:34 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:35 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 105ms (105ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:35 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:35 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:38 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-3892a4a4-573c-44bc-be50-3638f9660b93" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 126ms (126ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:39 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:41 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 111ms (111ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 422ms (422ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:42 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 343ms (343ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 143ms (143ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:43 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:44 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:44 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 130ms (131ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:44 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:44 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:44 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:45 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 114ms (114ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:45 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:45 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:34:51 +0000 UTC Warning Pod demand-backup-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:00 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:34:59 Waiting for MySQL ready state 2026/05/05 15:34:59 MySQL is ready 2026/05/05 15:34:59 Peers: [3639303236386132.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3861346662636536.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:34:59 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:34:59 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:34:59 lookup demand-backup-mysql-1 [10.202.233.13] 2026/05/05 15:34:59 PodIP: 10.202.233.13 2026/05/05 15:34:59 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.9] 2026/05/05 15:34:59 PrimaryIP: 10.202.234.9 2026/05/05 15:34:59 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:34:59 Opening connection to 10.202.233.13 2026/05/05 15:34:59 Clone required: true 2026/05/05 15:34:59 Checking if a clone in progress 2026/05/05 15:34:59 Clone in progress: false 2026/05/05 15:34:59 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:35:00 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:00 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:03 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 101ms (101ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:35 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:35 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:35 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:38 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-4d7e0743-56d5-4b69-b6ab-e09d08e4ab21 pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:39 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:41 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:42 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:42 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 160ms (160ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:42 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:42 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:44 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:44 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 112ms (112ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:44 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:45 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:45 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:45 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 98ms (98ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:45 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:45 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:46 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-4d7e0743-56d5-4b69-b6ab-e09d08e4ab21" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:48 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:48 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 142ms (142ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:48 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:48 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 114ms (114ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 107ms (107ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:51 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 171ms (171ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:51 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:35:51 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:02 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:02 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:02 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 134ms (134ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:02 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:02 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 150ms (150ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 102ms (102ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:05 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:08 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:36:08 Waiting for MySQL ready state 2026/05/05 15:36:08 MySQL is ready 2026/05/05 15:36:08 Peers: [3639303236386132.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3861346662636536.demand-backup-mysql-unready.kuttl-test-fancy-weevil 6530313036356361.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:36:08 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:36:08 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:36:08 lookup demand-backup-mysql-2 [10.202.232.14] 2026/05/05 15:36:08 PodIP: 10.202.232.14 2026/05/05 15:36:08 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.9] 2026/05/05 15:36:08 PrimaryIP: 10.202.234.9 2026/05/05 15:36:08 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:36:08 Opening connection to 10.202.232.14 2026/05/05 15:36:08 Clone required: true 2026/05/05 15:36:08 Checking if a clone in progress 2026/05/05 15:36:08 Clone in progress: false 2026/05/05 15:36:08 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:36:08 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:08 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:36:12 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 92ms (92ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:22 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:37:23 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:23 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:25 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:25 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:26 +0000 UTC Warning Pod demand-backup-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.202.234.10:3000/api/health": dial tcp 10.202.234.10:3000: connect: connection refused kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:30 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:37:30 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:31 +0000 UTC Warning Pod demand-backup-orc-1.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.202.233.12:3000/api/health": dial tcp 10.202.233.12:3000: connect: connection refused kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:32 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:32 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:32 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:37:36 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:01 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:01 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:02 +0000 UTC Warning Pod demand-backup-orc-0.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.202.232.12:3000/api/health": dial tcp 10.202.232.12:3000: connect: connection refused kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:03 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-restore-restore-of-demand-backup-with-backup-source-gz2m6 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:03 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup-with-backup-source SuccessfulCreate Created pod: xb-restore-restore-of-demand-backup-with-backup-source-gz2m6 job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:10 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:11 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:11 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 128ms (128ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:11 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:11 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:13 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:13 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 125ms (125ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:13 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:13 +0000 UTC Normal Pod xb-restore-restore-of-demand-backup-with-backup-source-gz2m6.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:21 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:21 +0000 UTC Warning Pod demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:21 +0000 UTC Normal Job.batch xb-restore-restore-of-demand-backup-with-backup-source Completed Job completed job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:22 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:22 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:22 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 120ms (120ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:22 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:22 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:24 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:24 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 89ms (89ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:24 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:24 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 111ms (111ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:35 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ff7d8f30-bbb0-42e4-aac9-9a0d9a668c24" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 153ms (153ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 120ms (120ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 106ms (106ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 88ms (88ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:57 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 131ms (131ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 122ms (122ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 133ms (133ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:38:59 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:12 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:12 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:12 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:15 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-648b6ab9-de57-4902-909f-eb8e4b1e22a1 pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:15 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-0 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 129ms (129ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:15 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:16 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:16 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:17 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:17 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 110ms (110ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 108ms (109ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:23 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-648b6ab9-de57-4902-909f-eb8e4b1e22a1" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:25 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:25 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 159ms (159ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:25 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:25 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 131ms (131ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 133ms (133ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:28 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 126ms (126ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:28 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:28 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:31 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-orc-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:32 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:32 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 161ms (161ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:32 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:32 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Warning Pod demand-backup-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 103ms (103ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 110ms (110ms including waiting). Image size: 73406442 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:34 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:46 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:39:45 Waiting for MySQL ready state 2026/05/05 15:39:45 MySQL is ready 2026/05/05 15:39:45 Peers: [3262373930343632.demand-backup-mysql-unready.kuttl-test-fancy-weevil 6561333238316533.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:39:45 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:39:45 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:39:45 lookup demand-backup-mysql-1 [10.202.233.16] 2026/05/05 15:39:45 PodIP: 10.202.233.16 2026/05/05 15:39:45 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.12] 2026/05/05 15:39:45 PrimaryIP: 10.202.234.12 2026/05/05 15:39:45 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:39:45 Opening connection to 10.202.233.16 2026/05/05 15:39:45 Clone required: true 2026/05/05 15:39:45 Checking if a clone in progress 2026/05/05 15:39:45 Clone in progress: false 2026/05/05 15:39:45 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:39:46 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:46 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:39:50 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 124ms (124ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:20 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:20 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:20 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-fancy-weevil/datadir-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:24 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-0466bac4-ec28-4590-91f6-44e87719d44e pd.csi.storage.gke.io_gke-9eb0b059f9f542b7b564-fff4-87fa-vm_80fdaf8e-6137-4895-9598-b035d9f24e09 logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:24 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-mysql-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:30 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-1 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-gvts default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:30 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:30 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 132ms (132ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:30 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:30 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 146ms (146ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 98ms (98ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:32 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-0466bac4-ec28-4590-91f6-44e87719d44e" attachdetach-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:33 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:34 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 131ms (131ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:34 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:34 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 118ms (118ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 181ms (181ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 114ms (114ms including waiting). Image size: 138673481 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:49 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/demand-backup-haproxy-2 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-hgfs default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 155ms (155ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 122ms (122ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 139ms (139ms including waiting). Image size: 103552638 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:52 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:54 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2026/05/05 15:40:53 Waiting for MySQL ready state 2026/05/05 15:40:53 MySQL is ready 2026/05/05 15:40:53 Peers: [3165316539393765.demand-backup-mysql-unready.kuttl-test-fancy-weevil 3262373930343632.demand-backup-mysql-unready.kuttl-test-fancy-weevil 6561333238316533.demand-backup-mysql-unready.kuttl-test-fancy-weevil] 2026/05/05 15:40:53 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:40:53 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil demand-backup-mysql-2.demand-backup-mysql.kuttl-test-fancy-weevil] 2026/05/05 15:40:53 lookup demand-backup-mysql-2 [10.202.232.18] 2026/05/05 15:40:53 PodIP: 10.202.232.18 2026/05/05 15:40:53 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-fancy-weevil [10.202.234.12] 2026/05/05 15:40:53 PrimaryIP: 10.202.234.12 2026/05/05 15:40:53 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:40:53 Opening connection to 10.202.232.18 2026/05/05 15:40:53 Clone required: true 2026/05/05 15:40:53 Checking if a clone in progress 2026/05/05 15:40:53 Clone in progress: false 2026/05/05 15:40:53 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-fancy-weevil 2026/05/05 15:40:54 Clone finished. Restarting container... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:54 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:40:57 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 115ms (115ms including waiting). Image size: 435034735 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-demand-backup-fail-minio-jnrb4 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 113ms (113ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:47 +0000 UTC Normal Job.batch xb-demand-backup-fail-minio SuccessfulCreate Created pod: xb-demand-backup-fail-minio-jnrb4 job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:50 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:50 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 102ms (102ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:50 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:41:50 +0000 UTC Normal Pod xb-demand-backup-fail-minio-jnrb4.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-demand-backup-fail-minio-6sw7z to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 158ms (158ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:00 +0000 UTC Normal Job.batch xb-demand-backup-fail-minio SuccessfulCreate Created pod: xb-demand-backup-fail-minio-6sw7z job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:03 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:03 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 115ms (115ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:03 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:03 +0000 UTC Normal Pod xb-demand-backup-fail-minio-6sw7z.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-demand-backup-fail-minio-lf8pj to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 124ms (124ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:23 +0000 UTC Normal Job.batch xb-demand-backup-fail-minio SuccessfulCreate Created pod: xb-demand-backup-fail-minio-lf8pj job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:26 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:26 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 95ms (95ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:26 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:42:26 +0000 UTC Normal Pod xb-demand-backup-fail-minio-lf8pj.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/xb-demand-backup-fail-minio-c2jsz to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1238-7677a7b6" in 122ms (122ms including waiting). Image size: 124972284 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:06 +0000 UTC Normal Job.batch xb-demand-backup-fail-minio SuccessfulCreate Created pod: xb-demand-backup-fail-minio-c2jsz job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:09 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:09 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 103ms (103ms including waiting). Image size: 451023996 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:09 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:09 +0000 UTC Normal Pod xb-demand-backup-fail-minio-c2jsz.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:12 +0000 UTC Warning Job.batch xb-demand-backup-fail-minio BackoffLimitExceeded Job has reached the specified backoff limit job-controller logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:46 +0000 UTC Normal Pod aws-cli-1777995826245276668 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/aws-cli-1777995826245276668 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:47 +0000 UTC Normal Pod aws-cli-1777995826245276668.spec.containers{aws-cli-1777995826245276668} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:47 +0000 UTC Normal Pod aws-cli-1777995826245276668.spec.containers{aws-cli-1777995826245276668} Pulled Successfully pulled image "perconalab/awscli" in 122ms (122ms including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:47 +0000 UTC Normal Pod aws-cli-1777995826245276668.spec.containers{aws-cli-1777995826245276668} Created Created container: aws-cli-1777995826245276668 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:47 +0000 UTC Normal Pod aws-cli-1777995826245276668.spec.containers{aws-cli-1777995826245276668} Started Started container aws-cli-1777995826245276668 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:53 +0000 UTC Normal Pod aws-cli-1777995832673950712 Binding Scheduled Successfully assigned kuttl-test-fancy-weevil/aws-cli-1777995832673950712 to gke-jen-ps-1238-7677a7b6-default-pool-abdafd6c-tr00 default-scheduler logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:53 +0000 UTC Normal Pod aws-cli-1777995832673950712.spec.containers{aws-cli-1777995832673950712} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:53 +0000 UTC Normal Pod aws-cli-1777995832673950712.spec.containers{aws-cli-1777995832673950712} Pulled Successfully pulled image "perconalab/awscli" in 102ms (102ms including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:53 +0000 UTC Normal Pod aws-cli-1777995832673950712.spec.containers{aws-cli-1777995832673950712} Created Created container: aws-cli-1777995832673950712 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:53 +0000 UTC Normal Pod aws-cli-1777995832673950712.spec.containers{aws-cli-1777995832673950712} Started Started container aws-cli-1777995832673950712 kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:57 +0000 UTC Warning PodDisruptionBudget.policy demand-backup-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "demand-backup-haproxy-1" controllermanager logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:43:58 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:43:58 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:44:00 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:44:00 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:44:02 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:44:02 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | 2026-05-05 15:44:07 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2026/05/05 15:44:07 MySQL state is not ready... kubelet logger.go:42: 15:44:09 | demand-backup | Deleting namespace "kuttl-test-fancy-weevil" === NAME kuttl harness.go:404: run tests finished harness.go:511: cleaning up harness.go:568: removing temp folder: "" --- PASS: kuttl (1187.84s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/demand-backup (1187.13s) PASS