=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.58.25.83 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 38 tests === RUN kuttl/harness === RUN kuttl/harness/gr-demand-backup === PAUSE kuttl/harness/gr-demand-backup === CONT kuttl/harness/gr-demand-backup logger.go:42: 19:21:24 | gr-demand-backup | Creating namespace: kuttl-test-alive-sponge logger.go:42: 19:21:25 | gr-demand-backup/0-minio-secret | starting test step 0-minio-secret logger.go:42: 19:21:25 | gr-demand-backup/0-minio-secret | Secret:kuttl-test-alive-sponge/minio-secret created logger.go:42: 19:21:26 | gr-demand-backup/0-minio-secret | test step completed 0-minio-secret logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | starting test step 1-deploy-operator logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_tls_cluster_secrets deploy_client deploy_minio] logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + source ../../functions logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ realpath ../../.. logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++++ pwd logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++ test_name=gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ GIT_BRANCH=PR-986 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++++ which gdate logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++++ which date logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ oc get projects logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ : logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ kubectl get nodes logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | +++ grep '^minikube' logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | ++ oc get projects logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + init_temp_dir logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + rm -rf /tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + mkdir -p /tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + apply_s3_storage_secrets logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + apply_minio_secret logger.go:42: 19:21:26 | gr-demand-backup/1-deploy-operator | + kubectl -n kuttl-test-alive-sponge apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf/minio-secret.yml logger.go:42: 19:21:27 | gr-demand-backup/1-deploy-operator | Warning: resource secrets/minio-secret is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. logger.go:42: 19:21:27 | gr-demand-backup/1-deploy-operator | secret/minio-secret configured logger.go:42: 19:21:27 | gr-demand-backup/1-deploy-operator | + kubectl -n kuttl-test-alive-sponge apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf/cloud-secret.yml logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | secret/aws-s3-secret created logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | secret/gcp-cs-secret created logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | secret/azure-secret created logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | + deploy_operator logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | + destroy_operator logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 19:21:29 | gr-demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + true logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + true logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + create_namespace ps-operator logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + local namespace=ps-operator logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + [[ -n '' ]] logger.go:42: 19:21:30 | gr-demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 19:21:31 | gr-demand-backup/1-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 19:21:31 | gr-demand-backup/1-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 19:21:32 | gr-demand-backup/1-deploy-operator | namespace/ps-operator created logger.go:42: 19:21:32 | gr-demand-backup/1-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy/crd.yaml logger.go:42: 19:21:33 | gr-demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 19:21:33 | gr-demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 19:21:34 | gr-demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 19:21:34 | gr-demand-backup/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 19:21:34 | gr-demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy/cw-rbac.yaml logger.go:42: 19:21:36 | gr-demand-backup/1-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 19:21:36 | gr-demand-backup/1-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:21:36 | gr-demand-backup/1-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 19:21:37 | gr-demand-backup/1-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy/cw-operator.yaml logger.go:42: 19:21:38 | gr-demand-backup/1-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 19:21:39 | gr-demand-backup/1-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 19:21:39 | gr-demand-backup/1-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 19:21:39 | gr-demand-backup/1-deploy-operator | + kubectl -n kuttl-test-alive-sponge apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf/ssl-secret.yaml logger.go:42: 19:21:40 | gr-demand-backup/1-deploy-operator | secret/test-ssl created logger.go:42: 19:21:40 | gr-demand-backup/1-deploy-operator | + deploy_client logger.go:42: 19:21:40 | gr-demand-backup/1-deploy-operator | + kubectl -n kuttl-test-alive-sponge apply -f - logger.go:42: 19:21:40 | gr-demand-backup/1-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:21:40 | gr-demand-backup/1-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf/client.yaml logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | pod/mysql-client created logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | + deploy_minio logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | + local access_key logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | + local secret_key logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-alive-sponge get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 19:21:41 | gr-demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 19:21:42 | gr-demand-backup/1-deploy-operator | + access_key=some-access-key logger.go:42: 19:21:42 | gr-demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-alive-sponge get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 19:21:42 | gr-demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 19:21:42 | gr-demand-backup/1-deploy-operator | + secret_key=some-secret-key logger.go:42: 19:21:42 | gr-demand-backup/1-deploy-operator | + helm uninstall -n kuttl-test-alive-sponge minio-service logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + : logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + helm repo remove minio logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | Error: no repositories configured logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + : logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | "minio" has been added to your repositories logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | +++ printf %q some-access-key logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | ++ printf %q some-access-key logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | +++ printf %q some-secret-key logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | ++ printf %q some-secret-key logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-alive-sponge --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + local max=10 logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + local delay=60 logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + shift 2 logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + local n=1 logger.go:42: 19:21:43 | gr-demand-backup/1-deploy-operator | + helm install minio-service -n kuttl-test-alive-sponge --version 5.4.0 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | NAME: minio-service logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | LAST DEPLOYED: Wed Aug 13 19:21:44 2025 logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | NAMESPACE: kuttl-test-alive-sponge logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | STATUS: deployed logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | REVISION: 1 logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | TEST SUITE: None logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | NOTES: logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | minio-service.kuttl-test-alive-sponge.cluster.local logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-alive-sponge -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-alive-sponge logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-alive-sponge minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-alive-sponge minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | 3. mc ls minio-service-local logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | + MINIO_POD=minio-service-86dfccd949-ghwws logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | + wait_pod minio-service-86dfccd949-ghwws logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | + local pod=minio-service-86dfccd949-ghwws logger.go:42: 19:22:15 | gr-demand-backup/1-deploy-operator | + set +o xtrace logger.go:42: 19:22:16 | gr-demand-backup/1-deploy-operator | minio-service-86dfccd949-ghwwstrue logger.go:42: 19:22:16 | gr-demand-backup/1-deploy-operator | + kubectl -n kuttl-test-alive-sponge run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access-key'\'' AWS_SECRET_ACCESS_KEY='\''some-secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 19:22:20 | gr-demand-backup/1-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 19:22:20 | gr-demand-backup/1-deploy-operator | warning: couldn't attach to pod/aws-cli, falling back to streaming logs: Internal error occurred: unable to upgrade connection: container aws-cli not found in pod aws-cli_kuttl-test-alive-sponge logger.go:42: 19:22:20 | gr-demand-backup/1-deploy-operator | make_bucket: operator-testing logger.go:42: 19:22:22 | gr-demand-backup/1-deploy-operator | pod "aws-cli" deleted logger.go:42: 19:22:23 | gr-demand-backup/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:22:23 | gr-demand-backup/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:22:24 | gr-demand-backup/1-deploy-operator | INFO Found 1 resource(s). logger.go:42: 19:22:24 | gr-demand-backup/1-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 19:22:24 | gr-demand-backup/1-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 19:22:24 | gr-demand-backup/1-deploy-operator | ASSERT PASS logger.go:42: 19:22:24 | gr-demand-backup/1-deploy-operator | test step completed 1-deploy-operator logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | starting test step 2-create-cluster logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval ".spec.backup.backoffLimit=3" - \ | yq eval '.spec.backup.storages.minio.type="s3"' - \ | yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \ | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ | yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \ | yq eval '.spec.backup.storages.aws-s3.type="s3"' - \ | yq eval ".spec.backup.storages.aws-s3.verifyTLS=true" - \ | yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - \ | yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - \ | yq eval ".spec.backup.storages.gcp-cs.verifyTLS=true" - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - \ | yq eval '.spec.backup.storages.azure-blob.type="azure"' - \ | yq eval ".spec.backup.storages.azure-blob.verifyTLS=true" - \ | yq eval '.spec.backup.storages.azure-blob.azure.container="operator-testing"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - \ | yq eval '.spec.mysql.clusterType="group-replication"' - \ | yq eval ".spec.proxy.router.enabled=true" - \ | yq eval ".spec.proxy.haproxy.enabled=false" - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | + source ../../functions logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ realpath ../../.. logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++++ pwd logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++ test_name=gr-demand-backup logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ GIT_BRANCH=PR-986 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++++ which gdate logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | ++++ which date logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ date=/usr/sbin/date logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ oc get projects logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ : logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ kubectl get nodes logger.go:42: 19:22:24 | gr-demand-backup/2-create-cluster | +++ grep '^minikube' logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ oc get projects logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + get_cr logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + local name_suffix= logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.backup.backoffLimit=3 - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.type="s3"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.endpointUrl="http://minio-service.kuttl-test-alive-sponge:9000"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.metadata.name="%s"' gr-demand-backup logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.metadata.name="gr-demand-backup"' /mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy/cr.yaml logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.backup.storages.aws-s3.verifyTLS=true - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.type="s3"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.backup.storages.gcp-cs.verifyTLS=true - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.type="azure"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + '[' -n '' ']' logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.backup.storages.azure-blob.verifyTLS=true - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.container="operator-testing"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="group-replication"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=false - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + kubectl -n kuttl-test-alive-sponge apply -f - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval .spec.proxy.router.enabled=true - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 19:22:25 | gr-demand-backup/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 19:22:26 | gr-demand-backup/2-create-cluster | perconaservermysql.ps.percona.com/gr-demand-backup created logger.go:42: 19:27:28 | gr-demand-backup/2-create-cluster | test step completed 2-create-cluster logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | starting test step 3-write-data logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | running command: [sh -c set -o errexit set -o pipefail set -o xtrace source ../../functions password=$(get_user_pass root) run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'"] logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | + source ../../functions logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ realpath ../../.. logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++++ pwd logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++ test_name=gr-demand-backup logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++++ which gdate logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | ++++ which date logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ date=/usr/sbin/date logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ oc get projects logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ : logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ kubectl get nodes logger.go:42: 19:27:28 | gr-demand-backup/3-write-data | +++ grep '^minikube' logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | ++ oc get projects logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | ++ get_user_pass root logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | ++ local user=root logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | ++ base64 --decode logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | +++ get_cluster_name logger.go:42: 19:27:29 | gr-demand-backup/3-write-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | ++ local cluster=gr-demand-backup logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | ++ echo gr-demand-backup-router logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + local pod= logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | ++ get_client_pod logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + client_pod=mysql-client logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + wait_pod mysql-client logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + local pod=mysql-client logger.go:42: 19:27:30 | gr-demand-backup/3-write-data | + set +o xtrace logger.go:42: 19:27:31 | gr-demand-backup/3-write-data | mysql-clienttrue logger.go:42: 19:27:31 | gr-demand-backup/3-write-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:31 | gr-demand-backup/3-write-data | + sed -e 's/mysql: //' logger.go:42: 19:27:31 | gr-demand-backup/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | + : logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | +++ get_cluster_name logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | ++ local cluster=gr-demand-backup logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | ++ echo gr-demand-backup-router logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | + local pod= logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | ++ get_client_pod logger.go:42: 19:27:32 | gr-demand-backup/3-write-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + client_pod=mysql-client logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + wait_pod mysql-client logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + local pod=mysql-client logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + set +o xtrace logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | mysql-clienttrue logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + sed -e 's/mysql: //' logger.go:42: 19:27:33 | gr-demand-backup/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:34 | gr-demand-backup/3-write-data | + : logger.go:42: 19:27:34 | gr-demand-backup/3-write-data | test step completed 3-write-data logger.go:42: 19:27:34 | gr-demand-backup/4-create-backup-minio | starting test step 4-create-backup-minio logger.go:42: 19:27:35 | gr-demand-backup/4-create-backup-minio | PerconaServerMySQLBackup:kuttl-test-alive-sponge/gr-demand-backup-minio created logger.go:42: 19:27:47 | gr-demand-backup/4-create-backup-minio | test step completed 4-create-backup-minio logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | starting test step 5-delete-data logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 04-delete-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | + source ../../functions logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ realpath ../../.. logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++++ pwd logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ test_name=gr-demand-backup logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++++ which gdate logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++++ which date logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ date=/usr/sbin/date logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ oc get projects logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ : logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ kubectl get nodes logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | +++ grep '^minikube' logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ oc get projects logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ get_user_pass root logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ local user=root logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:27:47 | gr-demand-backup/5-delete-data | ++ base64 --decode logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | +++ get_cluster_name logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | ++ local cluster=gr-demand-backup logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | ++ echo gr-demand-backup-router logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | + local pod= logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | ++ get_client_pod logger.go:42: 19:27:48 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + client_pod=mysql-client logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + wait_pod mysql-client logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + local pod=mysql-client logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + set +o xtrace logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | mysql-clienttrue logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:49 | gr-demand-backup/5-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:50 | gr-demand-backup/5-delete-data | + : logger.go:42: 19:27:50 | gr-demand-backup/5-delete-data | ++ get_cluster_name logger.go:42: 19:27:50 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | + cluster_name=gr-demand-backup logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | + for i in 0 1 2 logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ local pod= logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | +++ get_client_pod logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ client_pod=mysql-client logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ wait_pod mysql-client logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ local pod=mysql-client logger.go:42: 19:27:51 | gr-demand-backup/5-delete-data | ++ set +o xtrace logger.go:42: 19:27:52 | gr-demand-backup/5-delete-data | mysql-clienttrue logger.go:42: 19:27:52 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:52 | gr-demand-backup/5-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:52 | gr-demand-backup/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:53 | gr-demand-backup/5-delete-data | ++ : logger.go:42: 19:27:53 | gr-demand-backup/5-delete-data | + data= logger.go:42: 19:27:53 | gr-demand-backup/5-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 04-delete-data-minio-0 --from-literal=data= logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | configmap/04-delete-data-minio-0 created logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | + for i in 0 1 2 logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ local pod= logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | +++ get_client_pod logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ client_pod=mysql-client logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ wait_pod mysql-client logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ local pod=mysql-client logger.go:42: 19:27:54 | gr-demand-backup/5-delete-data | ++ set +o xtrace logger.go:42: 19:27:55 | gr-demand-backup/5-delete-data | mysql-clienttrue logger.go:42: 19:27:55 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:55 | gr-demand-backup/5-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:55 | gr-demand-backup/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | ++ : logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | + data= logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 04-delete-data-minio-1 --from-literal=data= logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | configmap/04-delete-data-minio-1 created logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | + for i in 0 1 2 logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | ++ local pod= logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | +++ get_client_pod logger.go:42: 19:27:56 | gr-demand-backup/5-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ client_pod=mysql-client logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ wait_pod mysql-client logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ local pod=mysql-client logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ set +o xtrace logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | mysql-clienttrue logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:57 | gr-demand-backup/5-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:58 | gr-demand-backup/5-delete-data | ++ : logger.go:42: 19:27:58 | gr-demand-backup/5-delete-data | + data= logger.go:42: 19:27:58 | gr-demand-backup/5-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 04-delete-data-minio-2 --from-literal=data= logger.go:42: 19:27:59 | gr-demand-backup/5-delete-data | configmap/04-delete-data-minio-2 created logger.go:42: 19:28:00 | gr-demand-backup/5-delete-data | test step completed 5-delete-data logger.go:42: 19:28:00 | gr-demand-backup/6-restore-from-minio | starting test step 6-restore-from-minio logger.go:42: 19:28:01 | gr-demand-backup/6-restore-from-minio | PerconaServerMySQLRestore:kuttl-test-alive-sponge/gr-demand-backup-restore-minio created logger.go:42: 19:33:10 | gr-demand-backup/6-restore-from-minio | test step completed 6-restore-from-minio logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | starting test step 7-read-data logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 06-read-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | + source ../../functions logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ realpath ../../.. logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++++ pwd logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++ test_name=gr-demand-backup logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++++ which gdate logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | ++++ which date logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ date=/usr/sbin/date logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ oc get projects logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ : logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ kubectl get nodes logger.go:42: 19:33:10 | gr-demand-backup/7-read-data | +++ grep '^minikube' logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ oc get projects logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ get_user_pass root logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ local user=root logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ base64 --decode logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ get_cluster_name logger.go:42: 19:33:11 | gr-demand-backup/7-read-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | + cluster_name=gr-demand-backup logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | + for i in 0 1 2 logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ local pod= logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | +++ get_client_pod logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ client_pod=mysql-client logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ wait_pod mysql-client logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ local pod=mysql-client logger.go:42: 19:33:12 | gr-demand-backup/7-read-data | ++ set +o xtrace logger.go:42: 19:33:13 | gr-demand-backup/7-read-data | mysql-clienttrue logger.go:42: 19:33:13 | gr-demand-backup/7-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:13 | gr-demand-backup/7-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:13 | gr-demand-backup/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | + data=100500 logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-minio-0 --from-literal=data=100500 logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | configmap/06-read-data-minio-0 created logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | + for i in 0 1 2 logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | ++ local pod= logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | +++ get_client_pod logger.go:42: 19:33:14 | gr-demand-backup/7-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ client_pod=mysql-client logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ wait_pod mysql-client logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ local pod=mysql-client logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ set +o xtrace logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | mysql-clienttrue logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:15 | gr-demand-backup/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:16 | gr-demand-backup/7-read-data | + data=100500 logger.go:42: 19:33:16 | gr-demand-backup/7-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-minio-1 --from-literal=data=100500 logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | configmap/06-read-data-minio-1 created logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | + for i in 0 1 2 logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ local pod= logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | +++ get_client_pod logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ client_pod=mysql-client logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ wait_pod mysql-client logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ local pod=mysql-client logger.go:42: 19:33:17 | gr-demand-backup/7-read-data | ++ set +o xtrace logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | mysql-clienttrue logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | + data=100500 logger.go:42: 19:33:18 | gr-demand-backup/7-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-minio-2 --from-literal=data=100500 logger.go:42: 19:33:19 | gr-demand-backup/7-read-data | configmap/06-read-data-minio-2 created logger.go:42: 19:33:20 | gr-demand-backup/7-read-data | test step completed 7-read-data logger.go:42: 19:33:20 | gr-demand-backup/8-create-backup-s3 | starting test step 8-create-backup-s3 logger.go:42: 19:33:21 | gr-demand-backup/8-create-backup-s3 | PerconaServerMySQLBackup:kuttl-test-alive-sponge/gr-demand-backup-s3 created logger.go:42: 19:33:33 | gr-demand-backup/8-create-backup-s3 | test step completed 8-create-backup-s3 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | starting test step 9-delete-data logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 08-delete-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | + source ../../functions logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ realpath ../../.. logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++++ pwd logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ test_name=gr-demand-backup logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++++ which gdate logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++++ which date logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ date=/usr/sbin/date logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ oc get projects logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ : logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ kubectl get nodes logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | +++ grep '^minikube' logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ oc get projects logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ get_user_pass root logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ local user=root logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:33:33 | gr-demand-backup/9-delete-data | ++ base64 --decode logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | +++ get_cluster_name logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | ++ local cluster=gr-demand-backup logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | ++ echo gr-demand-backup-router logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | + local pod= logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | ++ get_client_pod logger.go:42: 19:33:34 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + client_pod=mysql-client logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + wait_pod mysql-client logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + local pod=mysql-client logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + set +o xtrace logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | mysql-clienttrue logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:33:35 | gr-demand-backup/9-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:36 | gr-demand-backup/9-delete-data | + : logger.go:42: 19:33:36 | gr-demand-backup/9-delete-data | ++ get_cluster_name logger.go:42: 19:33:36 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | + cluster_name=gr-demand-backup logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | + for i in 0 1 2 logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ local pod= logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | +++ get_client_pod logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ client_pod=mysql-client logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ wait_pod mysql-client logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ local pod=mysql-client logger.go:42: 19:33:37 | gr-demand-backup/9-delete-data | ++ set +o xtrace logger.go:42: 19:33:38 | gr-demand-backup/9-delete-data | mysql-clienttrue logger.go:42: 19:33:38 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:38 | gr-demand-backup/9-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:38 | gr-demand-backup/9-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | ++ : logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | + data= logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 08-delete-data-s3-0 --from-literal=data= logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | configmap/08-delete-data-s3-0 created logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | + for i in 0 1 2 logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | ++ local pod= logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | +++ get_client_pod logger.go:42: 19:33:39 | gr-demand-backup/9-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ client_pod=mysql-client logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ wait_pod mysql-client logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ local pod=mysql-client logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ set +o xtrace logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | mysql-clienttrue logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:40 | gr-demand-backup/9-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:41 | gr-demand-backup/9-delete-data | ++ : logger.go:42: 19:33:41 | gr-demand-backup/9-delete-data | + data= logger.go:42: 19:33:41 | gr-demand-backup/9-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 08-delete-data-s3-1 --from-literal=data= logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | configmap/08-delete-data-s3-1 created logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | + for i in 0 1 2 logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ local pod= logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | +++ get_client_pod logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ client_pod=mysql-client logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ wait_pod mysql-client logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ local pod=mysql-client logger.go:42: 19:33:42 | gr-demand-backup/9-delete-data | ++ set +o xtrace logger.go:42: 19:33:43 | gr-demand-backup/9-delete-data | mysql-clienttrue logger.go:42: 19:33:43 | gr-demand-backup/9-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:33:43 | gr-demand-backup/9-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:33:43 | gr-demand-backup/9-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:33:44 | gr-demand-backup/9-delete-data | ++ : logger.go:42: 19:33:44 | gr-demand-backup/9-delete-data | + data= logger.go:42: 19:33:44 | gr-demand-backup/9-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 08-delete-data-s3-2 --from-literal=data= logger.go:42: 19:33:44 | gr-demand-backup/9-delete-data | configmap/08-delete-data-s3-2 created logger.go:42: 19:33:45 | gr-demand-backup/9-delete-data | test step completed 9-delete-data logger.go:42: 19:33:45 | gr-demand-backup/10-restore-from-s3 | starting test step 10-restore-from-s3 logger.go:42: 19:33:46 | gr-demand-backup/10-restore-from-s3 | PerconaServerMySQLRestore:kuttl-test-alive-sponge/gr-demand-backup-restore-s3 created logger.go:42: 19:38:52 | gr-demand-backup/10-restore-from-s3 | test step completed 10-restore-from-s3 logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | starting test step 11-read-data logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 06-read-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | + source ../../functions logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ realpath ../../.. logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | ++++ pwd logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | ++ test_name=gr-demand-backup logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:38:52 | gr-demand-backup/11-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++++ which gdate logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++++ which date logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ date=/usr/sbin/date logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ oc get projects logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ : logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ kubectl get nodes logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | +++ grep '^minikube' logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++ oc get projects logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++ get_user_pass root logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++ local user=root logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:38:53 | gr-demand-backup/11-read-data | ++ base64 --decode logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ get_cluster_name logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | + cluster_name=gr-demand-backup logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | + for i in 0 1 2 logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | ++ local pod= logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | +++ get_client_pod logger.go:42: 19:38:54 | gr-demand-backup/11-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ client_pod=mysql-client logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ wait_pod mysql-client logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ local pod=mysql-client logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ set +o xtrace logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | mysql-clienttrue logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:38:55 | gr-demand-backup/11-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:38:56 | gr-demand-backup/11-read-data | + data=100500 logger.go:42: 19:38:56 | gr-demand-backup/11-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-s3-0 --from-literal=data=100500 logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | configmap/06-read-data-s3-0 created logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | + for i in 0 1 2 logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ local pod= logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | +++ get_client_pod logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ client_pod=mysql-client logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ wait_pod mysql-client logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ local pod=mysql-client logger.go:42: 19:38:57 | gr-demand-backup/11-read-data | ++ set +o xtrace logger.go:42: 19:38:58 | gr-demand-backup/11-read-data | mysql-clienttrue logger.go:42: 19:38:58 | gr-demand-backup/11-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:58 | gr-demand-backup/11-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:38:58 | gr-demand-backup/11-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | + data=100500 logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-s3-1 --from-literal=data=100500 logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | configmap/06-read-data-s3-1 created logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | + for i in 0 1 2 logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | ++ local pod= logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | +++ get_client_pod logger.go:42: 19:38:59 | gr-demand-backup/11-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ client_pod=mysql-client logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ wait_pod mysql-client logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ local pod=mysql-client logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ set +o xtrace logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | mysql-clienttrue logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:39:00 | gr-demand-backup/11-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:39:01 | gr-demand-backup/11-read-data | + data=100500 logger.go:42: 19:39:01 | gr-demand-backup/11-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-s3-2 --from-literal=data=100500 logger.go:42: 19:39:01 | gr-demand-backup/11-read-data | configmap/06-read-data-s3-2 created logger.go:42: 19:39:03 | gr-demand-backup/11-read-data | test step completed 11-read-data logger.go:42: 19:39:03 | gr-demand-backup/12-create-backup-gcp | starting test step 12-create-backup-gcp logger.go:42: 19:39:03 | gr-demand-backup/12-create-backup-gcp | PerconaServerMySQLBackup:kuttl-test-alive-sponge/gr-demand-backup-gcp created logger.go:42: 19:39:15 | gr-demand-backup/12-create-backup-gcp | test step completed 12-create-backup-gcp logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | starting test step 13-delete-data logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 12-delete-data-gcp-${i} --from-literal=data="${data}" done] logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | + source ../../functions logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ realpath ../../.. logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++++ pwd logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++ test_name=gr-demand-backup logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++++ which gdate logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | ++++ which date logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ date=/usr/sbin/date logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ oc get projects logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ : logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ kubectl get nodes logger.go:42: 19:39:15 | gr-demand-backup/13-delete-data | +++ grep '^minikube' logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | ++ oc get projects logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | ++ get_user_pass root logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | ++ local user=root logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | ++ base64 --decode logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | +++ get_cluster_name logger.go:42: 19:39:16 | gr-demand-backup/13-delete-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | ++ local cluster=gr-demand-backup logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | ++ echo gr-demand-backup-router logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + local pod= logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | ++ get_client_pod logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + client_pod=mysql-client logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + wait_pod mysql-client logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + local pod=mysql-client logger.go:42: 19:39:17 | gr-demand-backup/13-delete-data | + set +o xtrace logger.go:42: 19:39:18 | gr-demand-backup/13-delete-data | mysql-clienttrue logger.go:42: 19:39:18 | gr-demand-backup/13-delete-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:18 | gr-demand-backup/13-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:39:18 | gr-demand-backup/13-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | + : logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ get_cluster_name logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | + cluster_name=gr-demand-backup logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | + for i in 0 1 2 logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | ++ local pod= logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | +++ get_client_pod logger.go:42: 19:39:19 | gr-demand-backup/13-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ client_pod=mysql-client logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ wait_pod mysql-client logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ local pod=mysql-client logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ set +o xtrace logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | mysql-clienttrue logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:39:20 | gr-demand-backup/13-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:39:21 | gr-demand-backup/13-delete-data | ++ : logger.go:42: 19:39:21 | gr-demand-backup/13-delete-data | + data= logger.go:42: 19:39:21 | gr-demand-backup/13-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 12-delete-data-gcp-0 --from-literal=data= logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | configmap/12-delete-data-gcp-0 created logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | + for i in 0 1 2 logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ local pod= logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | +++ get_client_pod logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ client_pod=mysql-client logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ wait_pod mysql-client logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ local pod=mysql-client logger.go:42: 19:39:22 | gr-demand-backup/13-delete-data | ++ set +o xtrace logger.go:42: 19:39:23 | gr-demand-backup/13-delete-data | mysql-clienttrue logger.go:42: 19:39:23 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:23 | gr-demand-backup/13-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:39:23 | gr-demand-backup/13-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:39:24 | gr-demand-backup/13-delete-data | ++ : logger.go:42: 19:39:24 | gr-demand-backup/13-delete-data | + data= logger.go:42: 19:39:24 | gr-demand-backup/13-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 12-delete-data-gcp-1 --from-literal=data= logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | configmap/12-delete-data-gcp-1 created logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | + for i in 0 1 2 logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ local pod= logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | +++ get_client_pod logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ client_pod=mysql-client logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ wait_pod mysql-client logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ local pod=mysql-client logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ set +o xtrace logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | mysql-clienttrue logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:39:25 | gr-demand-backup/13-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:39:26 | gr-demand-backup/13-delete-data | ++ : logger.go:42: 19:39:26 | gr-demand-backup/13-delete-data | + data= logger.go:42: 19:39:26 | gr-demand-backup/13-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 12-delete-data-gcp-2 --from-literal=data= logger.go:42: 19:39:27 | gr-demand-backup/13-delete-data | configmap/12-delete-data-gcp-2 created logger.go:42: 19:39:28 | gr-demand-backup/13-delete-data | test step completed 13-delete-data logger.go:42: 19:39:28 | gr-demand-backup/14-restore-from-gcp | starting test step 14-restore-from-gcp logger.go:42: 19:39:29 | gr-demand-backup/14-restore-from-gcp | PerconaServerMySQLRestore:kuttl-test-alive-sponge/gr-demand-backup-restore-gcp created logger.go:42: 19:44:29 | gr-demand-backup/14-restore-from-gcp | test step completed 14-restore-from-gcp logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | starting test step 15-read-data logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 06-read-data-gcp-${i} --from-literal=data="${data}" done] logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | + source ../../functions logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ realpath ../../.. logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++++ pwd logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++ test_name=gr-demand-backup logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++++ which gdate logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | ++++ which date logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ date=/usr/sbin/date logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ oc get projects logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ : logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ kubectl get nodes logger.go:42: 19:44:29 | gr-demand-backup/15-read-data | +++ grep '^minikube' logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ oc get projects logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ get_user_pass root logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ local user=root logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ base64 --decode logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ get_cluster_name logger.go:42: 19:44:30 | gr-demand-backup/15-read-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | + cluster_name=gr-demand-backup logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | + for i in 0 1 2 logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | ++ local pod= logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | +++ get_client_pod logger.go:42: 19:44:31 | gr-demand-backup/15-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ client_pod=mysql-client logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ wait_pod mysql-client logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ local pod=mysql-client logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ set +o xtrace logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | mysql-clienttrue logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:44:32 | gr-demand-backup/15-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | + data=100500 logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-gcp-0 --from-literal=data=100500 logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | configmap/06-read-data-gcp-0 created logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | + for i in 0 1 2 logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | ++ local pod= logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | +++ get_client_pod logger.go:42: 19:44:33 | gr-demand-backup/15-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ client_pod=mysql-client logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ wait_pod mysql-client logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ local pod=mysql-client logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ set +o xtrace logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | mysql-clienttrue logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:44:34 | gr-demand-backup/15-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:44:35 | gr-demand-backup/15-read-data | + data=100500 logger.go:42: 19:44:35 | gr-demand-backup/15-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-gcp-1 --from-literal=data=100500 logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | configmap/06-read-data-gcp-1 created logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | + for i in 0 1 2 logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ local pod= logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | +++ get_client_pod logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ client_pod=mysql-client logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ wait_pod mysql-client logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ local pod=mysql-client logger.go:42: 19:44:36 | gr-demand-backup/15-read-data | ++ set +o xtrace logger.go:42: 19:44:37 | gr-demand-backup/15-read-data | mysql-clienttrue logger.go:42: 19:44:37 | gr-demand-backup/15-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:37 | gr-demand-backup/15-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:44:37 | gr-demand-backup/15-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:44:38 | gr-demand-backup/15-read-data | + data=100500 logger.go:42: 19:44:38 | gr-demand-backup/15-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-gcp-2 --from-literal=data=100500 logger.go:42: 19:44:38 | gr-demand-backup/15-read-data | configmap/06-read-data-gcp-2 created logger.go:42: 19:44:39 | gr-demand-backup/15-read-data | test step completed 15-read-data logger.go:42: 19:44:39 | gr-demand-backup/16-create-backup-azure | starting test step 16-create-backup-azure logger.go:42: 19:44:40 | gr-demand-backup/16-create-backup-azure | PerconaServerMySQLBackup:kuttl-test-alive-sponge/gr-demand-backup-azure created logger.go:42: 19:44:52 | gr-demand-backup/16-create-backup-azure | test step completed 16-create-backup-azure logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | starting test step 17-delete-data logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_mysql_router_service $(get_cluster_name)) -uroot -p'$password'" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 16-delete-data-azure-${i} --from-literal=data="${data}" done] logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | + source ../../functions logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ realpath ../../.. logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++++ pwd logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ test_name=gr-demand-backup logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++++ which gdate logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++++ which date logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ date=/usr/sbin/date logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ oc get projects logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ : logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ kubectl get nodes logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | +++ grep '^minikube' logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ oc get projects logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ get_user_pass root logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ local user=root logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:44:52 | gr-demand-backup/17-delete-data | ++ base64 --decode logger.go:42: 19:44:53 | gr-demand-backup/17-delete-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:44:53 | gr-demand-backup/17-delete-data | +++ get_cluster_name logger.go:42: 19:44:53 | gr-demand-backup/17-delete-data | +++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | ++ get_mysql_router_service gr-demand-backup logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | ++ local cluster=gr-demand-backup logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | ++ echo gr-demand-backup-router logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + local 'uri=-h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + local pod= logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | ++ get_client_pod logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + client_pod=mysql-client logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + wait_pod mysql-client logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + local pod=mysql-client logger.go:42: 19:44:54 | gr-demand-backup/17-delete-data | + set +o xtrace logger.go:42: 19:44:55 | gr-demand-backup/17-delete-data | mysql-clienttrue logger.go:42: 19:44:55 | gr-demand-backup/17-delete-data | + kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h gr-demand-backup-router -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:55 | gr-demand-backup/17-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:44:55 | gr-demand-backup/17-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | + : logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ get_cluster_name logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | + cluster_name=gr-demand-backup logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | + for i in 0 1 2 logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | ++ local pod= logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | +++ get_client_pod logger.go:42: 19:44:56 | gr-demand-backup/17-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ client_pod=mysql-client logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ wait_pod mysql-client logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ local pod=mysql-client logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ set +o xtrace logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | mysql-clienttrue logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:44:57 | gr-demand-backup/17-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:44:58 | gr-demand-backup/17-delete-data | ++ : logger.go:42: 19:44:58 | gr-demand-backup/17-delete-data | + data= logger.go:42: 19:44:58 | gr-demand-backup/17-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 16-delete-data-azure-0 --from-literal=data= logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | configmap/16-delete-data-azure-0 created logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | + for i in 0 1 2 logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ local pod= logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | +++ get_client_pod logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ client_pod=mysql-client logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ wait_pod mysql-client logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ local pod=mysql-client logger.go:42: 19:44:59 | gr-demand-backup/17-delete-data | ++ set +o xtrace logger.go:42: 19:45:00 | gr-demand-backup/17-delete-data | mysql-clienttrue logger.go:42: 19:45:00 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:45:00 | gr-demand-backup/17-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:45:00 | gr-demand-backup/17-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | ++ : logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | + data= logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 16-delete-data-azure-1 --from-literal=data= logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | configmap/16-delete-data-azure-1 created logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | + for i in 0 1 2 logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | ++ local pod= logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | +++ get_client_pod logger.go:42: 19:45:01 | gr-demand-backup/17-delete-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ client_pod=mysql-client logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ wait_pod mysql-client logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ local pod=mysql-client logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ set +o xtrace logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | mysql-clienttrue logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:45:02 | gr-demand-backup/17-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:45:03 | gr-demand-backup/17-delete-data | ++ : logger.go:42: 19:45:03 | gr-demand-backup/17-delete-data | + data= logger.go:42: 19:45:03 | gr-demand-backup/17-delete-data | + kubectl create configmap -n kuttl-test-alive-sponge 16-delete-data-azure-2 --from-literal=data= logger.go:42: 19:45:03 | gr-demand-backup/17-delete-data | configmap/16-delete-data-azure-2 created logger.go:42: 19:45:05 | gr-demand-backup/17-delete-data | test step completed 17-delete-data logger.go:42: 19:45:05 | gr-demand-backup/18-restore-from-azure | starting test step 18-restore-from-azure logger.go:42: 19:45:05 | gr-demand-backup/18-restore-from-azure | PerconaServerMySQLRestore:kuttl-test-alive-sponge/gr-demand-backup-restore-azure created logger.go:42: 19:50:17 | gr-demand-backup/18-restore-from-azure | test step completed 18-restore-from-azure logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | starting test step 19-read-data logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions password=$(get_user_pass root) cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -p'$password'") kubectl create configmap -n "${NAMESPACE}" 06-read-data-azure-${i} --from-literal=data="${data}" done] logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | + source ../../functions logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ realpath ../../.. logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++++ pwd logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ test_name=gr-demand-backup logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ GIT_BRANCH=PR-986 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export MINIO_VER=5.4.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ MINIO_VER=5.4.0 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ export VAULT_VER=0.16.1 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ VAULT_VER=0.16.1 logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++++ which gdate logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++++ which date logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ date=/usr/sbin/date logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ oc get projects logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ : logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ kubectl get nodes logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | +++ grep '^minikube' logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ oc get projects logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ get_user_pass root logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ local user=root logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ kubectl -n kuttl-test-alive-sponge get secret test-secrets -o 'jsonpath={.data.root}' logger.go:42: 19:50:17 | gr-demand-backup/19-read-data | ++ base64 --decode logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | + password='pdqC>*{Fiq2R)mzJ,d' logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ get_cluster_name logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ kubectl -n kuttl-test-alive-sponge get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | + cluster_name=gr-demand-backup logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | + for i in 0 1 2 logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ local 'uri=-h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | ++ local pod= logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | +++ get_client_pod logger.go:42: 19:50:18 | gr-demand-backup/19-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ client_pod=mysql-client logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ wait_pod mysql-client logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ local pod=mysql-client logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ set +o xtrace logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | mysql-clienttrue logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-0.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:50:19 | gr-demand-backup/19-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:50:20 | gr-demand-backup/19-read-data | + data=100500 logger.go:42: 19:50:20 | gr-demand-backup/19-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-azure-0 --from-literal=data=100500 logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | configmap/06-read-data-azure-0 created logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | + for i in 0 1 2 logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ local 'uri=-h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ local pod= logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | +++ get_client_pod logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ client_pod=mysql-client logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ wait_pod mysql-client logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ local pod=mysql-client logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ set +o xtrace logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | mysql-clienttrue logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-1.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:50:21 | gr-demand-backup/19-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:50:22 | gr-demand-backup/19-read-data | + data=100500 logger.go:42: 19:50:22 | gr-demand-backup/19-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-azure-1 --from-literal=data=100500 logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | configmap/06-read-data-azure-1 created logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | + for i in 0 1 2 logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | ++ local 'uri=-h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | ++ local pod= logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | +++ get_client_pod logger.go:42: 19:50:23 | gr-demand-backup/19-read-data | +++ kubectl -n kuttl-test-alive-sponge get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ client_pod=mysql-client logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ wait_pod mysql-client logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ local pod=mysql-client logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ set +o xtrace logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | mysql-clienttrue logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ kubectl -n kuttl-test-alive-sponge exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h gr-demand-backup-mysql-2.gr-demand-backup-mysql -uroot -p'\''pdqC>*{Fiq2R)mzJ,d'\''' logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:50:24 | gr-demand-backup/19-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:50:25 | gr-demand-backup/19-read-data | + data=100500 logger.go:42: 19:50:25 | gr-demand-backup/19-read-data | + kubectl create configmap -n kuttl-test-alive-sponge 06-read-data-azure-2 --from-literal=data=100500 logger.go:42: 19:50:25 | gr-demand-backup/19-read-data | configmap/06-read-data-azure-2 created logger.go:42: 19:50:27 | gr-demand-backup/19-read-data | test step completed 19-read-data logger.go:42: 19:50:27 | gr-demand-backup/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 19:50:27 | gr-demand-backup/98-drop-finalizer | PerconaServerMySQL:kuttl-test-alive-sponge/gr-demand-backup updated logger.go:42: 19:50:27 | gr-demand-backup/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 19:50:27 | gr-demand-backup/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/tests/gr-demand-backup logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++ test_name=gr-demand-backup logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/vars.sh logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-986 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/deploy logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-986/e2e-tests/conf logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/gr-demand-backup logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-986 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-986 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export VERSION=PR-986-fb6e2fa0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ VERSION=PR-986-fb6e2fa0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-986/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++++ which date logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ : logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 19:50:28 | gr-demand-backup/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 19:50:29 | gr-demand-backup/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:50:29 | gr-demand-backup/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 19:50:29 | gr-demand-backup/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 19:50:29 | gr-demand-backup/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 19:50:29 | gr-demand-backup/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:50:30 | gr-demand-backup/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 19:50:37 | gr-demand-backup/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 19:50:37 | gr-demand-backup | gr-demand-backup events from ns kuttl-test-alive-sponge: logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:41 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-alive-sponge/mysql-client to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:42 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql8.0" already present on machine kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:42 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:42 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:47 +0000 UTC Normal ReplicaSet.apps minio-service-86dfccd949 SuccessfulCreate Created pod: minio-service-86dfccd949-ghwws replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:47 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:47 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-86dfccd949 to 1 deployment-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:47 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:47 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/minio-service" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:48 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-4t29t job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:49 +0000 UTC Normal Pod minio-service-post-job-4t29t Binding Scheduled Successfully assigned kuttl-test-alive-sponge/minio-service-post-job-4t29t to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:49 +0000 UTC Normal Pod minio-service-post-job-4t29t.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:51 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws Binding Scheduled Successfully assigned kuttl-test-alive-sponge/minio-service-86dfccd949-ghwws to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:51 +0000 UTC Normal Pod minio-service-post-job-4t29t.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2024-11-21T17-21-54Z" in 1.697s (1.697s including waiting). Image size: 28122288 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:51 +0000 UTC Normal Pod minio-service-post-job-4t29t.spec.containers{minio-make-user} Created Created container: minio-make-user kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:51 +0000 UTC Normal Pod minio-service-post-job-4t29t.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:51 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-491ee7b2-c375-4fef-9bd9-7fbc597c3c9b pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:21:59 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-491ee7b2-c375-4fef-9bd9-7fbc597c3c9b" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:00 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:02 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2024-12-18T13-15-44Z" in 2.469s (2.469s including waiting). Image size: 62642371 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:02 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws.spec.containers{minio} Created Created container: minio kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:02 +0000 UTC Normal Pod minio-service-86dfccd949-ghwws.spec.containers{minio} Started Started container minio kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:14 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:16 +0000 UTC Normal Pod aws-cli Binding Scheduled Successfully assigned kuttl-test-alive-sponge/aws-cli to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:16 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:19 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 2.343s (2.343s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:19 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container: aws-cli kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:19 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-0" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-mysql-0 Pod gr-demand-backup-mysql-0 in StatefulSet gr-demand-backup-mysql success statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Pod gr-demand-backup-mysql-0 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:27 +0000 UTC Warning PerconaServerMySQL.ps.percona.com gr-demand-backup ClusterStateChanged -> Initializing ps-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:31 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-0 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:38 +0000 UTC Normal Pod gr-demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:40 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:40 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 175ms (175ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:40 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:40 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:42 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:58 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 15.857s (15.857s including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:58 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:58 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:22:58 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:17 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 19.596s (19.597s including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:17 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:17 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:37 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:37 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-mysql-1 Pod gr-demand-backup-mysql-1 in StatefulSet gr-demand-backup-mysql success statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:40 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Pod gr-demand-backup-mysql-1 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:41 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:41 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:45 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-7257be81-11d3-40f3-8ac6-0c5a0437da07 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:46 +0000 UTC Normal Pod gr-demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-1 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:53 +0000 UTC Normal Pod gr-demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7257be81-11d3-40f3-8ac6-0c5a0437da07" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:57 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 234ms (234ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:23:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:00 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:00 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 188ms (188ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:00 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:00 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:00 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:15 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 14.735s (14.735s including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:15 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:15 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:30 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:24:17 Waiting for MySQL ready state 2025/08/13 19:24:17 MySQL is ready 2025/08/13 19:24:17 Starting bootstrap... 2025/08/13 19:24:18 mysql-shell version: 8.0.42 2025/08/13 19:24:18 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:24:18 Instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:24:18 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:24:18 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:24:19 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:24:19 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:24:19 Adding instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:24:19 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:30 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:30 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:24:34 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 150ms (150ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:07 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:07 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Claim datadir-gr-demand-backup-mysql-2 Pod gr-demand-backup-mysql-2 in StatefulSet gr-demand-backup-mysql success statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:08 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:08 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:08 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulCreate create Pod gr-demand-backup-mysql-2 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:12 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-32338f38-a0d0-4a41-be07-3c4414b4c037 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:13 +0000 UTC Normal Pod gr-demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:20 +0000 UTC Normal Pod gr-demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-32338f38-a0d0-4a41-be07-3c4414b4c037" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:21 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:22 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 201ms (201ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:22 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:22 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:23 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:38 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 14.461s (14.461s including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:38 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:38 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:38 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:58 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 19.584s (19.584s including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:58 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:25:58 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:26:19 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:26:01 Waiting for MySQL ready state 2025/08/13 19:26:04 MySQL is ready 2025/08/13 19:26:04 Starting bootstrap... 2025/08/13 19:26:05 mysql-shell version: 8.0.42 2025/08/13 19:26:05 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:26:06 Instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:26:06 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:26:06 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:26:06 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:26:07 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:26:07 Adding instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:26:07 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: The target instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to decide whether incremental state recovery can correctly provision it. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:26:19 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:26:19 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:26:20 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 232ms (232ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-f5wws to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-g6bt8 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-w664n to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-w664n replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-f5wws replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-g6bt8 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:02 +0000 UTC Normal Deployment.apps gr-demand-backup-router ScalingReplicaSet Scaled up replica set gr-demand-backup-router-777cbbbc58 to 3 deployment-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 222ms (222ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 160ms (160ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 228ms (228ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:04 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:04 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:05 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 9.313s (9.313s including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 9.216s (9.216s including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:15 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 10.044s (10.044s including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:15 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:15 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:28 +0000 UTC Warning PerconaServerMySQL.ps.percona.com gr-demand-backup ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-gr-demand-backup-minio-minio-6cjgz to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 158ms (158ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:36 +0000 UTC Normal Job.batch xb-gr-demand-backup-minio-minio SuccessfulCreate Created pod: xb-gr-demand-backup-minio-minio-6cjgz job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:38 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:39 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 155ms (155ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:39 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:39 +0000 UTC Normal Pod xb-gr-demand-backup-minio-minio-6cjgz.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:27:46 +0000 UTC Normal Job.batch xb-gr-demand-backup-minio-minio Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulDelete delete Pod gr-demand-backup-mysql-2 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Warning Pod gr-demand-backup-router-777cbbbc58-f5wws.spec.containers{router} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-g6bt8.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Warning Pod gr-demand-backup-router-777cbbbc58-w664n.spec.containers{router} Unhealthy Readiness probe failed: {"isAlive":true} kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-f5wws replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-g6bt8 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-w664n replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:02 +0000 UTC Normal Deployment.apps gr-demand-backup-router ScalingReplicaSet Scaled down replica set gr-demand-backup-router-777cbbbc58 to 0 from 3 deployment-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:03 +0000 UTC Warning PerconaServerMySQL.ps.percona.com gr-demand-backup ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:06 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:28:06 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:10 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:10 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:10 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulDelete delete Pod gr-demand-backup-mysql-1 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:17 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:17 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:17 +0000 UTC Normal StatefulSet.apps gr-demand-backup-mysql SuccessfulDelete delete Pod gr-demand-backup-mysql-0 in StatefulSet gr-demand-backup-mysql successful statefulset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:20 +0000 UTC Warning Pod gr-demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:28:20 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:21 +0000 UTC Warning PerconaServerMySQL.ps.percona.com gr-demand-backup ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:26 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-restore-gr-demand-backup-restore-minio-zjwcb to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:26 +0000 UTC Warning Pod xb-restore-gr-demand-backup-restore-minio-zjwcb FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:26 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-minio SuccessfulCreate Created pod: xb-restore-gr-demand-backup-restore-minio-zjwcb job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:49 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:50 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:50 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 215ms (215ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:50 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:50 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:52 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:52 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 152ms (152ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:52 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:28:52 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-minio-zjwcb.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:03 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-minio Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:04 +0000 UTC Normal Pod gr-demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-0 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:04 +0000 UTC Warning Pod gr-demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:04 +0000 UTC Warning PerconaServerMySQL.ps.percona.com gr-demand-backup ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:28 +0000 UTC Normal Pod gr-demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:29 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:29 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 222ms (222ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:29 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:29 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 150ms (150ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 168ms (168ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:29:31 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:04 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:04 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:04 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:08 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-f55650a1-cc93-4873-a4c8-43692c03cd29 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:09 +0000 UTC Normal Pod gr-demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-1 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:17 +0000 UTC Normal Pod gr-demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f55650a1-cc93-4873-a4c8-43692c03cd29" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:18 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:18 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 234ms (234ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:18 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:18 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:19 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 207ms (207ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 153ms (153ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:20 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:49 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:30:37 Waiting for MySQL ready state 2025/08/13 19:30:37 MySQL is ready 2025/08/13 19:30:37 Starting bootstrap... 2025/08/13 19:30:38 mysql-shell version: 8.0.42 2025/08/13 19:30:38 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:30:38 Instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:30:38 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:30:38 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:30:38 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:30:39 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:30:39 Adding instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:30:39 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:49 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:49 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:30:49 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 187ms (187ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:23 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:24 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:24 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:27 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-83e54b1d-6556-41da-9698-ebcc2b9cbc16 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:28 +0000 UTC Normal Pod gr-demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:35 +0000 UTC Normal Pod gr-demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-83e54b1d-6556-41da-9698-ebcc2b9cbc16" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:37 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:37 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 251ms (251ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:37 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:37 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 223ms (223ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:39 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 158ms (158ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:40 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:31:40 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:08 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:31:57 Waiting for MySQL ready state 2025/08/13 19:31:57 MySQL is ready 2025/08/13 19:31:57 Starting bootstrap... 2025/08/13 19:31:57 mysql-shell version: 8.0.42 2025/08/13 19:31:57 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |+----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:31:57 Instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:31:57 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:31:57 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:31:58 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:31:58 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:31:58 Adding instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:31:58 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:08 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:08 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:09 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 221ms (221ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-55jzq to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-bvvh7 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-kvwgh to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-kvwgh replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-55jzq replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-bvvh7 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:54 +0000 UTC Normal Deployment.apps gr-demand-backup-router ScalingReplicaSet Scaled up replica set gr-demand-backup-router-777cbbbc58 to 3 from 0 deployment-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 204ms (204ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 182ms (182ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:55 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:56 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 212ms (212ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:56 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:56 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 185ms (185ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 193ms (193ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:57 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:58 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:58 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 149ms (149ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:58 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:32:58 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:21 +0000 UTC Normal Job.batch xb-gr-demand-backup-s3-aws-s3 SuccessfulCreate Created pod: xb-gr-demand-backup-s3-aws-s3-xrjmx job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:22 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-gr-demand-backup-s3-aws-s3-xrjmx to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:22 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:22 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 248ms (248ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:22 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:22 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:24 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:24 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 184ms (184ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:24 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:24 +0000 UTC Normal Pod xb-gr-demand-backup-s3-aws-s3-xrjmx.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:30 +0000 UTC Normal Job.batch xb-gr-demand-backup-s3-aws-s3 Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-55jzq.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-bvvh7.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-kvwgh.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-55jzq replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-kvwgh replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:51 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-bvvh7 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:57 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:33:57 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:33:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:02 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:34:02 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:06 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:06 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:09 +0000 UTC Warning Pod gr-demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:34:09 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:17 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-restore-gr-demand-backup-restore-s3-r5krg to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:17 +0000 UTC Warning Pod xb-restore-gr-demand-backup-restore-s3-r5krg FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:17 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-s3 SuccessfulCreate Created pod: xb-restore-gr-demand-backup-restore-s3-r5krg job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:36 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:37 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:37 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 179ms (179ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:37 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:37 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:39 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:39 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 217ms (217ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:39 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:39 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-s3-r5krg.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-0 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:54 +0000 UTC Warning Pod gr-demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:34:54 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-s3 Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:15 +0000 UTC Normal Pod gr-demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:16 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:16 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 216ms (217ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:16 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:16 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:18 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:18 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 166ms (167ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:18 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:18 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:18 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:19 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 227ms (227ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:19 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:19 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:51 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:51 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:51 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:55 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-583aa80d-27da-4017-8a4a-6dfe6eb11a1f pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:35:55 +0000 UTC Normal Pod gr-demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-1 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:03 +0000 UTC Normal Pod gr-demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-583aa80d-27da-4017-8a4a-6dfe6eb11a1f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:04 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:04 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 255ms (255ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:04 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:04 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 139ms (139ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 174ms (174ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:06 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:36 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:36:24 Waiting for MySQL ready state 2025/08/13 19:36:24 MySQL is ready 2025/08/13 19:36:24 Starting bootstrap... 2025/08/13 19:36:24 mysql-shell version: 8.0.42 2025/08/13 19:36:24 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:36:24 Instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:36:24 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:36:24 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:36:25 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:36:25 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:36:25 Adding instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:36:25 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 0% In Progress PAGE COPY 0% Not Started REDO COPY 0% Not Started** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:36 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:36 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:36:36 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 173ms (173ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:09 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:09 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:09 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:13 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-55c8dd75-cf8a-4f7c-92b3-85467f2dbbb4 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:13 +0000 UTC Normal Pod gr-demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:21 +0000 UTC Normal Pod gr-demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-55c8dd75-cf8a-4f7c-92b3-85467f2dbbb4" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:24 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:25 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 174ms (174ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:25 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:25 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 190ms (190ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 213ms (213ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:27 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:56 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:37:44 Waiting for MySQL ready state 2025/08/13 19:37:44 MySQL is ready 2025/08/13 19:37:44 Starting bootstrap... 2025/08/13 19:37:45 mysql-shell version: 8.0.42 2025/08/13 19:37:45 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:37:45 Instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:37:45 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:37:45 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:37:45 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:37:45 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:37:45 Adding instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:37:45 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:56 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:56 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:37:56 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 147ms (147ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:35 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-mg69q to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:35 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-mg69q replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-6h4s2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 231ms (231ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-q4p9p to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-q4p9p replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:36 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate Created pod: gr-demand-backup-router-777cbbbc58-6h4s2 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 210ms (210ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 182ms (182ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:37 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 157ms (157ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 137ms (137ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:38 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:39 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:39 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 151ms (151ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:39 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:38:39 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:04 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-gr-demand-backup-gcp-gcp-cs-5nbq9 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:04 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:04 +0000 UTC Normal Job.batch xb-gr-demand-backup-gcp-gcp-cs SuccessfulCreate Created pod: xb-gr-demand-backup-gcp-gcp-cs-5nbq9 job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:05 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 152ms (152ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:05 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:05 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:07 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:07 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 201ms (201ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:07 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:07 +0000 UTC Normal Pod xb-gr-demand-backup-gcp-gcp-cs-5nbq9.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:13 +0000 UTC Normal Job.batch xb-gr-demand-backup-gcp-gcp-cs Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:30 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:30 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Warning Pod gr-demand-backup-router-777cbbbc58-6h4s2.spec.containers{router} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Warning Pod gr-demand-backup-router-777cbbbc58-mg69q.spec.containers{router} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 2b882a63b6cb3d384127b8e60feee310077237b647f629351da25940377678cc not found: not found kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-q4p9p.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-mg69q replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-q4p9p replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:31 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete Deleted pod: gr-demand-backup-router-777cbbbc58-6h4s2 replicaset-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:34 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:39:34 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:38 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:38 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:44 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:39:44 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:46 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:46 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:51 +0000 UTC Warning Pod gr-demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:39:51 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:55 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-restore-gr-demand-backup-restore-gcp-mk4g7 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:55 +0000 UTC Warning Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7 FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:39:55 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-gcp SuccessfulCreate Created pod: xb-restore-gr-demand-backup-restore-gcp-mk4g7 job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:13 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:14 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:14 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 225ms (225ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:14 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:15 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:16 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:17 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 168ms (168ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:17 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:17 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-gcp-mk4g7.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:32 +0000 UTC Normal Pod gr-demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-0 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:32 +0000 UTC Warning Pod gr-demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:32 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-gcp Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:51 +0000 UTC Normal Pod gr-demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:52 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:52 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 169ms (169ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:53 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:53 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 165ms (165ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 215ms (215ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:40:54 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:27 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:28 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:28 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:31 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-be376f89-fd68-4b7b-86f2-0752e0998d77 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:32 +0000 UTC Normal Pod gr-demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-1 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:39 +0000 UTC Normal Pod gr-demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-be376f89-fd68-4b7b-86f2-0752e0998d77" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:40 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:41 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 197ms (197ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:41 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:41 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 198ms (198ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 153ms (153ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:41:43 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:13 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:42:00 Waiting for MySQL ready state 2025/08/13 19:42:00 MySQL is ready 2025/08/13 19:42:00 Starting bootstrap... 2025/08/13 19:42:01 mysql-shell version: 8.0.42 2025/08/13 19:42:01 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:42:01 Instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:42:01 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:42:01 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:42:01 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:42:01 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:42:01 Adding instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:42:01 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:13 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:13 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:13 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 154ms (154ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:45 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:46 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:46 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:49 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-eecdfeba-f455-4631-baf5-53f50309d891 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:50 +0000 UTC Normal Pod gr-demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:57 +0000 UTC Normal Pod gr-demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-eecdfeba-f455-4631-baf5-53f50309d891" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:58 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:59 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 180ms (180ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:59 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:42:59 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:00 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:00 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 162ms (162ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:00 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:00 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:00 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:01 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 207ms (207ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:01 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:01 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:30 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:43:18 Waiting for MySQL ready state 2025/08/13 19:43:18 MySQL is ready 2025/08/13 19:43:18 Starting bootstrap... 2025/08/13 19:43:19 mysql-shell version: 8.0.42 2025/08/13 19:43:19 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:43:19 Instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:43:19 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:43:19 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:43:19 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:43:19 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:43:19 Adding instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:43:19 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / * Waiting for server restart... - kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:30 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:30 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:43:30 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 178ms (178ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-gblgl to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-pslwt to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:13 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-wjx5z to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:13 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulCreate (combined from similar events): Created pod: gr-demand-backup-router-777cbbbc58-88kzn logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 222ms (222ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 182ms (182ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 219ms (219ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:14 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:15 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 176ms (176ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 189ms (189ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 167ms (167ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:16 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:41 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-gr-demand-backup-azure-azure-blob-f9gx6 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:41 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:41 +0000 UTC Normal Job.batch xb-gr-demand-backup-azure-azure-blob SuccessfulCreate Created pod: xb-gr-demand-backup-azure-azure-blob-f9gx6 job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:42 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 203ms (203ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:42 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:42 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:43 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:43 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 206ms (206ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:43 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:43 +0000 UTC Normal Pod xb-gr-demand-backup-azure-azure-blob-f9gx6.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:44:49 +0000 UTC Normal Job.batch xb-gr-demand-backup-azure-azure-blob Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:06 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:06 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:07 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-gblgl.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:07 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pslwt.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:07 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-wjx5z.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:07 +0000 UTC Normal ReplicaSet.apps gr-demand-backup-router-777cbbbc58 SuccessfulDelete (combined from similar events): Deleted pod: gr-demand-backup-router-777cbbbc58-gblgl logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:14 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:14 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:20 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:45:20 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:22 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:22 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:27 +0000 UTC Warning Pod gr-demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:45:27 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:31 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f Binding Scheduled Successfully assigned kuttl-test-alive-sponge/xb-restore-gr-demand-backup-restore-azure-6fr9f to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:31 +0000 UTC Warning Pod xb-restore-gr-demand-backup-restore-azure-6fr9f FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:31 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-azure SuccessfulCreate Created pod: xb-restore-gr-demand-backup-restore-azure-6fr9f job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:54 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:55 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:55 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 232ms (232ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:55 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:55 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:57 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:57 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 204ms (204ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:57 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:45:57 +0000 UTC Normal Pod xb-restore-gr-demand-backup-restore-azure-6fr9f.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:11 +0000 UTC Normal Job.batch xb-restore-gr-demand-backup-restore-azure Completed Job completed job-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:12 +0000 UTC Normal Pod gr-demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-0 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:12 +0000 UTC Warning Pod gr-demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:33 +0000 UTC Normal Pod gr-demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-6de198cc-51dd-4462-a5ca-d2c03aa7287f" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:35 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:35 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 230ms (230ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:35 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:35 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:36 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 178ms (178ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 184ms (184ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:46:37 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:10 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:10 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:10 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:14 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-bfbce91e-109a-41e0-a486-91dc4adcdeb6 pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:14 +0000 UTC Normal Pod gr-demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-1 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:25 +0000 UTC Normal Pod gr-demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bfbce91e-109a-41e0-a486-91dc4adcdeb6" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:26 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:27 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 194ms (194ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:27 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:27 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 160ms (160ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:29 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 176ms (176ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:29 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:29 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:58 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:47:46 Waiting for MySQL ready state 2025/08/13 19:47:46 MySQL is ready 2025/08/13 19:47:46 Starting bootstrap... 2025/08/13 19:47:47 mysql-shell version: 8.0.42 2025/08/13 19:47:47 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable |Disabled super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:47:47 Instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:47:47 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:47:47 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:47:47 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:47:47 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:47:47 Adding instance (gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:47:47 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:58 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:47:58 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 201ms (201ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:32 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:32 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:32 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-alive-sponge/datadir-gr-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:35 +0000 UTC Normal PersistentVolumeClaim datadir-gr-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-5166467a-7085-4a0b-99c9-ec8391b74ded pd.csi.storage.gke.io_gke-70c313d695cf4ab8baf0-ad38-16fd-vm_d062077b-83b0-43c6-b696-7df8dbc85a31 logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:36 +0000 UTC Normal Pod gr-demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-mysql-2 to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:43 +0000 UTC Normal Pod gr-demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-5166467a-7085-4a0b-99c9-ec8391b74ded" attachdetach-controller logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:44 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:45 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 216ms (216ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:45 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:45 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 218ms (218ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 162ms (162ms including waiting). Image size: 428809836 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:48:47 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:49:16 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/13 19:49:04 Waiting for MySQL ready state 2025/08/13 19:49:04 MySQL is ready 2025/08/13 19:49:04 Starting bootstrap... 2025/08/13 19:49:05 mysql-shell version: 8.0.42 2025/08/13 19:49:05 Running dba.configureLocalInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'clearReadOnly': true}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The clearReadOnly option is deprecated and will be removed in a future release. WARNING: This function is deprecated and will be removed in a future release of MySQL Shell, use dba.configureInstance() instead. Configuring local MySQL instance listening at port 3306 for use in an InnoDB cluster... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 applierWorkerThreads will be set to the default value of 4. NOTE: Some configuration options need to be fixed: +----------------------------------------+---------------+----------------+----------------------------+ | Variable | Current Value | Required Value | Note |+----------------------------------------+---------------+----------------+----------------------------+ | binlog_transaction_dependency_tracking | COMMIT_ORDER | WRITESET | Update the server variable | +----------------------------------------+---------------+----------------+----------------------------+ Disabled super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Enabling super_read_only on the instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' Configuring instance... WARNING:*****@binlog_transaction_dependency_tracking' is deprecated and will be removed in a future release. (Code 1287). The instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' was configured to be used in an InnoDB cluster. 2025/08/13 19:49:05 Instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) configured to join to the InnoDB cluster 2025/08/13 19:49:05 peers: [gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge] 2025/08/13 19:49:05 Running dba.getCluster('grdemandbackup') Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. 2025/08/13 19:49:05 Connected to peer gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge 2025/08/13 19:49:06 Cluster status: ClusterName: grdemandbackup Status: OK_NO_TOLERANCE StatusText: Cluster is NOT tolerant to any failures. SSL: REQUIRED Primary: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Topology: Member 0 Address: gr-demand-backup-mysql-0.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] Member 1 Address: gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 State: ONLINE Errors: [] 2025/08/13 19:49:06 Adding instance (gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge) to InnoDB cluster 2025/08/13 19:49:06 Running dba.getCluster('grdemandbackup').addInstance('operator:*****@gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge', {'recoveryMethod': 'clone', 'waitRecovery': 3}) Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory WARNING: Using a password on the command line interface can be insecure. WARNING: The waitRecovery option is deprecated. Please use the recoveryProgress option instead. NOTE: A GTID set check of the MySQL instance at 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' determined that it is missing transactions that were purged from all cluster members. NOTE: The target instance 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306' has not been pre-provisioned (GTID set is empty). The Shell is unable to determine whether the instance has pre-existing data that would be overwritten with clone based recovery. Clone based recovery selected through the recoveryMethod option Validating instance configuration at gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306... This instance reports its own address as gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 Instance configuration is suitable. NOTE: Group Replication will communicate with other members using 'gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306'. Use the localAddress option to override. * Checking connectivity and SSL configuration... A new instance will be added to the InnoDB Cluster. Depending on the amount of data on the cluster this might take from a few seconds to several hours. Adding instance to the cluster... Monitoring recovery process of the new cluster member. Press ^C to stop monitoring and let it continue in background. Clone based state recovery is now in progress. NOTE: A server restart is expected to happen as part of the clone process. If the server does not support the RESTART command or does not come back after a while, you may need to manually start it back. * Waiting for clone to finish... NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is being cloned from gr-demand-backup-mysql-1.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 ** Stage DROP DATA: Completed ** Clone Transfer FILE COPY 100% Completed PAGE COPY 100% Completed REDO COPY 100% Completed NOTE: gr-demand-backup-mysql-2.gr-demand-backup-mysql.kuttl-test-alive-sponge:3306 is shutting down... * Waiting for server restart... \ * Waiting for server restart... | * Waiting for server restart... / kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:49:16 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:49:16 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:49:16 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 160ms (160ms including waiting). Image size: 437177392 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:00 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-88kzn to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-x2h6 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:00 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:00 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-jxchm to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-9kgt default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:00 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc Binding Scheduled Successfully assigned kuttl-test-alive-sponge/gr-demand-backup-router-777cbbbc58-pcchc to gke-jen-ps-986-fb6e2fa0--default-pool-1c4ad124-w8m4 default-scheduler logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:00 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 213ms (213ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.initContainers{router-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 214ms (214ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.initContainers{router-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-986-fb6e2fa0" in 206ms (206ms including waiting). Image size: 108991485 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.initContainers{router-init} Created Created container: router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:01 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.initContainers{router-init} Started Started container router-init kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:02 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 187ms (187ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.containers{router} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-router8.0" kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 207ms (207ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.containers{router} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-router8.0" in 192ms (192ms including waiting). Image size: 228799009 bytes. kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.containers{router} Created Created container: router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:03 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.containers{router} Started Started container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-88kzn.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-jxchm.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:28 +0000 UTC Normal Pod gr-demand-backup-router-777cbbbc58-pcchc.spec.containers{router} Killing Stopping container router kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:29 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:50:29 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:31 +0000 UTC Warning Pod gr-demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:50:31 MySQL state is not ready... kubelet logger.go:42: 19:50:37 | gr-demand-backup | 2025-08-13 19:50:34 +0000 UTC Warning Pod gr-demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/13 19:50:34 MySQL state is not ready... kubelet logger.go:42: 19:50:39 | gr-demand-backup | Deleting namespace: kuttl-test-alive-sponge === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- PASS: kuttl (1785.56s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/gr-demand-backup (1784.77s) PASS