=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.42.11.52 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/demand-backup === PAUSE kuttl/harness/demand-backup === CONT kuttl/harness/demand-backup logger.go:42: 19:00:22 | demand-backup | Creating namespace: kuttl-test-relative-reindeer logger.go:42: 19:00:22 | demand-backup/0-minio-secret | starting test step 0-minio-secret logger.go:42: 19:00:22 | demand-backup/0-minio-secret | Secret:kuttl-test-relative-reindeer/minio-secret created logger.go:42: 19:00:23 | demand-backup/0-minio-secret | test step completed 0-minio-secret logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | starting test step 1-deploy-operator logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client deploy_minio] logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | + source ../../functions logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ realpath ../../.. logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++++ pwd logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++ test_name=demand-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ GIT_BRANCH=PR-893 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++++ which gdate logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | ++++ which date logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ date=/usr/bin/date logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ oc get projects logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ : logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ kubectl get nodes logger.go:42: 19:00:23 | demand-backup/1-deploy-operator | +++ grep '^minikube' logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | ++ oc get projects logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | + init_temp_dir logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | + rm -rf /tmp/kuttl/ps/demand-backup logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | + mkdir -p /tmp/kuttl/ps/demand-backup logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | + apply_s3_storage_secrets logger.go:42: 19:00:24 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf/minio-secret.yml logger.go:42: 19:00:25 | demand-backup/1-deploy-operator | Warning: resource secrets/minio-secret is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. logger.go:42: 19:00:25 | demand-backup/1-deploy-operator | secret/minio-secret configured logger.go:42: 19:00:25 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf/cloud-secret.yml logger.go:42: 19:00:26 | demand-backup/1-deploy-operator | secret/aws-s3-secret created logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | secret/gcp-cs-secret created logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | secret/azure-secret created logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + deploy_operator logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + destroy_operator logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + true logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 19:00:27 | demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + true logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + create_namespace ps-operator logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + local namespace=ps-operator logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + [[ -n '' ]] logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 19:00:28 | demand-backup/1-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 19:00:29 | demand-backup/1-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 19:00:29 | demand-backup/1-deploy-operator | namespace/ps-operator created logger.go:42: 19:00:29 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy/crd.yaml logger.go:42: 19:00:30 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 19:00:30 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 19:00:31 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 19:00:31 | demand-backup/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 19:00:31 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy/cw-rbac.yaml logger.go:42: 19:00:33 | demand-backup/1-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 19:00:34 | demand-backup/1-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:00:34 | demand-backup/1-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 19:00:34 | demand-backup/1-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 19:00:35 | demand-backup/1-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-893-8b3e0608"' /mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy/cw-operator.yaml logger.go:42: 19:00:36 | demand-backup/1-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 19:00:37 | demand-backup/1-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 19:00:37 | demand-backup/1-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 19:00:37 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf/secrets.yaml logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | secret/test-secrets created logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf/ssl-secret.yaml logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | secret/test-ssl created logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | + deploy_client logger.go:42: 19:00:38 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf/client.yaml logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | pod/mysql-client created logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | + deploy_minio logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | + local access_key logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | + local secret_key logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-relative-reindeer get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 19:00:39 | demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | + access_key=some-access-key logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-relative-reindeer get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | + secret_key=some-secret-key logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | + helm uninstall -n kuttl-test-relative-reindeer minio-service logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:40 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + : logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + helm repo remove minio logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | Error: no repositories configured logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + : logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | "minio" has been added to your repositories logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | +++ printf %q some-access-key logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | ++ printf %q some-access-key logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | +++ printf %q some-secret-key logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | ++ printf %q some-secret-key logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-relative-reindeer --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + local max=10 logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + local delay=60 logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + shift 2 logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + local n=1 logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | + helm install minio-service -n kuttl-test-relative-reindeer --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:00:41 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-893/kubeconfig logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | NAME: minio-service logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | LAST DEPLOYED: Mon May 12 19:00:42 2025 logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | NAMESPACE: kuttl-test-relative-reindeer logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | STATUS: deployed logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | REVISION: 1 logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | TEST SUITE: None logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | NOTES: logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | minio-service.kuttl-test-relative-reindeer.svc.cluster.local logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-relative-reindeer -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-relative-reindeer logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-relative-reindeer minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-relative-reindeer minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | 3. mc ls minio-service-local logger.go:42: 19:01:19 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | + MINIO_POD=minio-service-8967c7f7f-4dvnh logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | + wait_pod minio-service-8967c7f7f-4dvnh logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | + local pod=minio-service-8967c7f7f-4dvnh logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | + set +o xtrace logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | minio-service-8967c7f7f-4dvnhtrue logger.go:42: 19:01:20 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-relative-reindeer run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access-key'\'' AWS_SECRET_ACCESS_KEY='\''some-secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 19:01:25 | demand-backup/1-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 19:01:27 | demand-backup/1-deploy-operator | pod "aws-cli" deleted [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 15 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002a9c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002a9c00}, 0x0}, {0x184a055?, 0xc000259f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00043c9a0, {0x1accd90, 0xc0002a8840}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc0000ea6c8?, {0x0, 0xc00043c9a0, {0x1accd90, 0xc0002a8840}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0000ea6c8, {0x0, 0xc00043c9a0, {0x1accd90, 0xc0002a8840}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc00033e608, 0xe6?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0003d36c0, 0xc00026cd00, {0xc00004af20, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0003d36c0, 0xc00026cd00, {0xc00004af20, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc00030caa0, 0xc00026cd00, 0xc0004fe750) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc00026cd00) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc00026cd00, 0xc0002685a0) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 14 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | INFO Found 1 resource(s). logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | ASSERT PASS logger.go:42: 19:01:28 | demand-backup/1-deploy-operator | test step completed 1-deploy-operator logger.go:42: 19:01:28 | demand-backup/2-create-cluster | starting test step 2-create-cluster logger.go:42: 19:01:28 | demand-backup/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval ".spec.mysql.size=3" - \ | yq eval ".spec.proxy.haproxy.enabled=true" - \ | yq eval ".spec.proxy.haproxy.size=3" - \ | yq eval ".spec.orchestrator.enabled=true" - \ | yq eval ".spec.orchestrator.size=3" - \ | yq eval '.spec.backup.storages.minio.type="s3"' - \ | yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - \ | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ | yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - \ | yq eval '.spec.backup.storages.aws-s3.type="s3"' - \ | yq eval ".spec.backup.storages.aws-s3.verifyTLS=true" - \ | yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - \ | yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - \ | yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - \ | yq eval ".spec.backup.storages.gcp-cs.verifyTLS=true" - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - \ | yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - \ | yq eval '.spec.backup.storages.azure-blob.type="azure"' - \ | yq eval ".spec.backup.storages.azure-blob.verifyTLS=true" - \ | yq eval '.spec.backup.storages.azure-blob.azure.containerName="operator-testing"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - \ | yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + source ../../functions logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ realpath ../../.. logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++++ pwd logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ test_name=demand-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ GIT_BRANCH=PR-893 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++++ which gdate logger.go:42: 19:01:28 | demand-backup/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++++ which date logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ oc get projects logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ : logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ kubectl get nodes logger.go:42: 19:01:28 | demand-backup/2-create-cluster | +++ grep '^minikube' logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ oc get projects logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + get_cr logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + local name_suffix= logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.type="azure"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.endpointUrl="http://minio-service.kuttl-test-relative-reindeer:9000"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.azure-blob.verifyTLS=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.aws-s3.verifyTLS=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.type="s3"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.gcp-cs.verifyTLS=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.type="s3"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.containerName="operator-testing"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + kubectl -n kuttl-test-relative-reindeer apply -f - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + '[' -n '' ']' logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.metadata.name="%s"' demand-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.metadata.name="demand-backup"' /mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy/cr.yaml logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-893-8b3e0608"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 19:01:28 | demand-backup/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:01:28 | demand-backup/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 19:01:30 | demand-backup/2-create-cluster | perconaservermysql.ps.percona.com/demand-backup created logger.go:42: 19:07:05 | demand-backup/2-create-cluster | test step completed 2-create-cluster logger.go:42: 19:07:05 | demand-backup/3-write-data | starting test step 3-write-data logger.go:42: 19:07:05 | demand-backup/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 19:07:05 | demand-backup/3-write-data | + source ../../functions logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ realpath ../../.. logger.go:42: 19:07:05 | demand-backup/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:05 | demand-backup/3-write-data | ++++ pwd logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | ++ test_name=demand-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:07:05 | demand-backup/3-write-data | ++++ which gdate logger.go:42: 19:07:05 | demand-backup/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:07:05 | demand-backup/3-write-data | ++++ which date logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ date=/usr/bin/date logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ oc get projects logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ : logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ kubectl get nodes logger.go:42: 19:07:05 | demand-backup/3-write-data | +++ grep '^minikube' logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ oc get projects logger.go:42: 19:07:06 | demand-backup/3-write-data | +++ get_cluster_name logger.go:42: 19:07:06 | demand-backup/3-write-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ local cluster=demand-backup logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ echo demand-backup-haproxy logger.go:42: 19:07:06 | demand-backup/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:06 | demand-backup/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 19:07:06 | demand-backup/3-write-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:06 | demand-backup/3-write-data | + local pod= logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ get_client_pod logger.go:42: 19:07:06 | demand-backup/3-write-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:07:06 | demand-backup/3-write-data | + client_pod=mysql-client logger.go:42: 19:07:06 | demand-backup/3-write-data | + wait_pod mysql-client logger.go:42: 19:07:06 | demand-backup/3-write-data | + local pod=mysql-client logger.go:42: 19:07:06 | demand-backup/3-write-data | + set +o xtrace logger.go:42: 19:07:07 | demand-backup/3-write-data | mysql-clienttrue logger.go:42: 19:07:07 | demand-backup/3-write-data | + sed -e 's/mysql: //' logger.go:42: 19:07:07 | demand-backup/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:07:07 | demand-backup/3-write-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:08 | demand-backup/3-write-data | + : logger.go:42: 19:07:08 | demand-backup/3-write-data | +++ get_cluster_name logger.go:42: 19:07:08 | demand-backup/3-write-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:07:08 | demand-backup/3-write-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:07:08 | demand-backup/3-write-data | ++ local cluster=demand-backup logger.go:42: 19:07:08 | demand-backup/3-write-data | ++ echo demand-backup-haproxy logger.go:42: 19:07:08 | demand-backup/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:08 | demand-backup/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 19:07:08 | demand-backup/3-write-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:08 | demand-backup/3-write-data | + local pod= logger.go:42: 19:07:08 | demand-backup/3-write-data | ++ get_client_pod logger.go:42: 19:07:08 | demand-backup/3-write-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:07:08 | demand-backup/3-write-data | + client_pod=mysql-client logger.go:42: 19:07:08 | demand-backup/3-write-data | + wait_pod mysql-client logger.go:42: 19:07:08 | demand-backup/3-write-data | + local pod=mysql-client logger.go:42: 19:07:08 | demand-backup/3-write-data | + set +o xtrace logger.go:42: 19:07:09 | demand-backup/3-write-data | mysql-clienttrue logger.go:42: 19:07:09 | demand-backup/3-write-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:07:09 | demand-backup/3-write-data | + sed -e 's/mysql: //' logger.go:42: 19:07:09 | demand-backup/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:07:10 | demand-backup/3-write-data | + : logger.go:42: 19:07:10 | demand-backup/3-write-data | test step completed 3-write-data logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | starting test step 4-move-primary-before-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary_pod_from_label="$(get_primary_from_label)" kubectl delete pod -n ${NAMESPACE} ${primary_pod_from_label} wait_cluster_consistency_async "${test_name}" "3" "3" new_primary_pod_from_label="$(get_primary_from_label)" if [ "${primary_pod_from_label}" == "${new_primary_pod_from_label}" ]; then echo "Old (${primary_pod_from_label}) and new (${new_primary_pod_from_label}) primary are the same (the failover didn't happen)!" exit 1 fi] logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | + source ../../functions logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ realpath ../../.. logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++++ pwd logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ test_name=demand-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ GIT_BRANCH=PR-893 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++++ which gdate logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++++ which date logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ date=/usr/bin/date logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ oc get projects logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ : logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ kubectl get nodes logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | +++ grep '^minikube' logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ oc get projects logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 19:07:10 | demand-backup/4-move-primary-before-backup | ++ kubectl -n kuttl-test-relative-reindeer get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 19:07:11 | demand-backup/4-move-primary-before-backup | + primary_pod_from_label=demand-backup-mysql-0 logger.go:42: 19:07:11 | demand-backup/4-move-primary-before-backup | + kubectl delete pod -n kuttl-test-relative-reindeer demand-backup-mysql-0 logger.go:42: 19:07:11 | demand-backup/4-move-primary-before-backup | pod "demand-backup-mysql-0" deleted logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + wait_cluster_consistency_async demand-backup 3 3 logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + local cluster_name=demand-backup logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + local cluster_size=3 logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + local orc_size=3 logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + '[' -z 3 ']' logger.go:42: 19:07:32 | demand-backup/4-move-primary-before-backup | + sleep 7 logger.go:42: 19:07:39 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:07:39 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:07:39 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:07:39 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:07:39 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:07:54 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:07:55 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:07:55 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:07:55 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:07:55 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:08:10 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:08:10 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:08:10 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:08:10 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:08:10 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:08:25 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:08:26 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:08:26 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:08:26 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:08:26 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:08:41 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:08:41 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:08:41 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:08:41 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:08:41 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:08:56 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:08:56 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:08:56 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:08:56 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:08:56 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:09:11 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:09:12 | demand-backup/4-move-primary-before-backup | + [[ initializing == \r\e\a\d\y ]] logger.go:42: 19:09:12 | demand-backup/4-move-primary-before-backup | + echo 'waiting for cluster readyness (async)' logger.go:42: 19:09:12 | demand-backup/4-move-primary-before-backup | waiting for cluster readyness (async) logger.go:42: 19:09:12 | demand-backup/4-move-primary-before-backup | + sleep 15 logger.go:42: 19:09:27 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.state}' logger.go:42: 19:09:27 | demand-backup/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:09:27 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.mysql.ready}' logger.go:42: 19:09:28 | demand-backup/4-move-primary-before-backup | + [[ 3 == \3 ]] logger.go:42: 19:09:28 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.orchestrator.ready}' logger.go:42: 19:09:28 | demand-backup/4-move-primary-before-backup | + [[ 3 == \3 ]] logger.go:42: 19:09:28 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.orchestrator.state}' logger.go:42: 19:09:29 | demand-backup/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:09:29 | demand-backup/4-move-primary-before-backup | ++ kubectl get ps demand-backup -n kuttl-test-relative-reindeer -o 'jsonpath={.status.state}' logger.go:42: 19:09:29 | demand-backup/4-move-primary-before-backup | + [[ ready == \r\e\a\d\y ]] logger.go:42: 19:09:29 | demand-backup/4-move-primary-before-backup | ++ get_primary_from_label logger.go:42: 19:09:29 | demand-backup/4-move-primary-before-backup | ++ kubectl -n kuttl-test-relative-reindeer get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 19:09:30 | demand-backup/4-move-primary-before-backup | + new_primary_pod_from_label=demand-backup-mysql-1 logger.go:42: 19:09:30 | demand-backup/4-move-primary-before-backup | + '[' demand-backup-mysql-0 == demand-backup-mysql-1 ']' logger.go:42: 19:09:30 | demand-backup/4-move-primary-before-backup | test step completed 4-move-primary-before-backup logger.go:42: 19:09:30 | demand-backup/5-create-backup-minio | starting test step 5-create-backup-minio logger.go:42: 19:09:30 | demand-backup/5-create-backup-minio | PerconaServerMySQLBackup:kuttl-test-relative-reindeer/demand-backup-minio created logger.go:42: 19:09:42 | demand-backup/5-create-backup-minio | test step completed 5-create-backup-minio logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | starting test step 6-check-password-leak logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | + source ../../functions logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ realpath ../../.. logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++++ pwd logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ test_name=demand-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ GIT_BRANCH=PR-893 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++++ which gdate logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++++ which date logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ date=/usr/bin/date logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ oc get projects logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ : logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ kubectl get nodes logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | +++ grep '^minikube' logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ oc get projects logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | + check_passwords_leak logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | + local secrets logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | + local passwords logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | + local pods logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 19:09:42 | demand-backup/6-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + secrets= logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + passwords=' ' logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pods -o name logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + pods='demand-backup-haproxy-0 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-haproxy-1 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-haproxy-2 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-mysql-0 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-mysql-1 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-mysql-2 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-orc-0 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-orc-1 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | demand-backup-orc-2 logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | minio-service-8967c7f7f-4dvnh logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | mysql-client logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | xb-demand-backup-minio-minio-7r48g' logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + collect_logs kuttl-test-relative-reindeer logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + local containers logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + local count logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + NS=kuttl-test-relative-reindeer logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:43 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:44 | demand-backup/6-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:09:44 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:44 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-0 -c haproxy logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-haproxy.txt logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-haproxy.txt logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-0 -c mysql-monit logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-mysql-monit.txt logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-mysql-monit.txt logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:45 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:46 | demand-backup/6-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:09:46 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:46 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-1 -c haproxy logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-haproxy.txt logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-haproxy.txt logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-1 -c mysql-monit logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-mysql-monit.txt logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-mysql-monit.txt logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:47 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:48 | demand-backup/6-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:09:48 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:48 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-2 -c haproxy logger.go:42: 19:09:49 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-haproxy.txt logger.go:42: 19:09:49 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-haproxy.txt logger.go:42: 19:09:49 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:49 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-2 -c mysql-monit logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-mysql-monit.txt logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-mysql-monit.txt logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:50 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c mysql logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-mysql.txt logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-mysql.txt logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c xtrabackup logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-xtrabackup.txt logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-xtrabackup.txt logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:51 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c pt-heartbeat logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-pt-heartbeat.txt logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-pt-heartbeat.txt logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:52 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c mysql logger.go:42: 19:09:53 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-mysql.txt logger.go:42: 19:09:53 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-mysql.txt logger.go:42: 19:09:53 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:53 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c xtrabackup logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-xtrabackup.txt logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-xtrabackup.txt logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c pt-heartbeat logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-pt-heartbeat.txt logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-pt-heartbeat.txt logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:54 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c mysql logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-mysql.txt logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-mysql.txt logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:55 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c xtrabackup logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-xtrabackup.txt logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-xtrabackup.txt logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c pt-heartbeat logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-pt-heartbeat.txt logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-pt-heartbeat.txt logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:56 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:57 | demand-backup/6-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:09:57 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:57 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-0 -c orc logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-orc.txt logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-orc.txt logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-0 -c mysql-monit logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-mysql-monit.txt logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-mysql-monit.txt logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:09:58 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:09:59 | demand-backup/6-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:09:59 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:09:59 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-1 -c orc logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-orc.txt logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-orc.txt logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-1 -c mysql-monit logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-mysql-monit.txt logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-mysql-monit.txt logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:10:00 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:10:01 | demand-backup/6-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:10:01 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:01 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-2 -c orc logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-orc.txt logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-orc.txt logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-2 -c mysql-monit logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-mysql-monit.txt logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-mysql-monit.txt logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod minio-service-8967c7f7f-4dvnh -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + containers=minio logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:02 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs minio-service-8967c7f7f-4dvnh -c minio logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-minio-service-8967c7f7f-4dvnh-minio.txt logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-minio-service-8967c7f7f-4dvnh-minio.txt logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:10:03 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + containers=mysql-client logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs mysql-client -c mysql-client logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-mysql-client-mysql-client.txt logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-mysql-client-mysql-client.txt logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:10:04 | demand-backup/6-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod xb-demand-backup-minio-minio-7r48g -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + containers=xtrabackup logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs xb-demand-backup-minio-minio-7r48g -c xtrabackup logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-demand-backup-minio-minio-7r48g-xtrabackup.txt logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-demand-backup-minio-minio-7r48g-xtrabackup.txt logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 19:10:05 | demand-backup/6-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + pods=percona-server-mysql-operator-bf7db58fb-gkgbj logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + collect_logs ps-operator logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + local containers logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + local count logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + NS=ps-operator logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + for p in '$pods' logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-bf7db58fb-gkgbj -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + containers=manager logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + for c in '$containers' logger.go:42: 19:10:06 | demand-backup/6-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-bf7db58fb-gkgbj -c manager logger.go:42: 19:10:07 | demand-backup/6-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-percona-server-mysql-operator-bf7db58fb-gkgbj-manager.txt logger.go:42: 19:10:07 | demand-backup/6-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-percona-server-mysql-operator-bf7db58fb-gkgbj-manager.txt logger.go:42: 19:10:07 | demand-backup/6-check-password-leak | + echo logger.go:42: 19:10:07 | demand-backup/6-check-password-leak | logger.go:42: 19:10:07 | demand-backup/6-check-password-leak | test step completed 6-check-password-leak logger.go:42: 19:10:07 | demand-backup/7-delete-data | starting test step 7-delete-data logger.go:42: 19:10:07 | demand-backup/7-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-delete-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 19:10:07 | demand-backup/7-delete-data | + source ../../functions logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ realpath ../../.. logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++++ pwd logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++ test_name=demand-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++++ which gdate logger.go:42: 19:10:07 | demand-backup/7-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++++ which date logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ date=/usr/bin/date logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ oc get projects logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ : logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ kubectl get nodes logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ grep '^minikube' logger.go:42: 19:10:07 | demand-backup/7-delete-data | ++ oc get projects logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ get_cluster_name logger.go:42: 19:10:07 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:10:08 | demand-backup/7-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:10:08 | demand-backup/7-delete-data | ++ local cluster=demand-backup logger.go:42: 19:10:08 | demand-backup/7-delete-data | ++ echo demand-backup-haproxy logger.go:42: 19:10:08 | demand-backup/7-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:10:08 | demand-backup/7-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:10:08 | demand-backup/7-delete-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:10:08 | demand-backup/7-delete-data | + local pod= logger.go:42: 19:10:08 | demand-backup/7-delete-data | ++ get_client_pod logger.go:42: 19:10:08 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:10:08 | demand-backup/7-delete-data | + client_pod=mysql-client logger.go:42: 19:10:08 | demand-backup/7-delete-data | + wait_pod mysql-client logger.go:42: 19:10:08 | demand-backup/7-delete-data | + local pod=mysql-client logger.go:42: 19:10:08 | demand-backup/7-delete-data | + set +o xtrace logger.go:42: 19:10:09 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 19:10:09 | demand-backup/7-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:10:09 | demand-backup/7-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:10:09 | demand-backup/7-delete-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:10:10 | demand-backup/7-delete-data | + : logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ get_cluster_name logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:10:10 | demand-backup/7-delete-data | + cluster_name=demand-backup logger.go:42: 19:10:10 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 19:10:10 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 19:10:10 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 19:10:10 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 19:10:11 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 19:10:11 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:11 | demand-backup/7-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:10:11 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ : logger.go:42: 19:10:12 | demand-backup/7-delete-data | + data= logger.go:42: 19:10:12 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-0 --from-literal=data= logger.go:42: 19:10:12 | demand-backup/7-delete-data | configmap/04-delete-data-minio-0 created logger.go:42: 19:10:12 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 19:10:12 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 19:10:12 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 19:10:12 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 19:10:13 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 19:10:13 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:13 | demand-backup/7-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:10:13 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ : logger.go:42: 19:10:14 | demand-backup/7-delete-data | + data= logger.go:42: 19:10:14 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-1 --from-literal=data= logger.go:42: 19:10:14 | demand-backup/7-delete-data | configmap/04-delete-data-minio-1 created logger.go:42: 19:10:14 | demand-backup/7-delete-data | + for i in 0 1 2 logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ local pod= logger.go:42: 19:10:14 | demand-backup/7-delete-data | +++ get_client_pod logger.go:42: 19:10:14 | demand-backup/7-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ client_pod=mysql-client logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ wait_pod mysql-client logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ local pod=mysql-client logger.go:42: 19:10:14 | demand-backup/7-delete-data | ++ set +o xtrace logger.go:42: 19:10:15 | demand-backup/7-delete-data | mysql-clienttrue logger.go:42: 19:10:15 | demand-backup/7-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:10:15 | demand-backup/7-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:10:15 | demand-backup/7-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:10:16 | demand-backup/7-delete-data | ++ : logger.go:42: 19:10:16 | demand-backup/7-delete-data | + data= logger.go:42: 19:10:16 | demand-backup/7-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-2 --from-literal=data= logger.go:42: 19:10:16 | demand-backup/7-delete-data | configmap/04-delete-data-minio-2 created logger.go:42: 19:10:17 | demand-backup/7-delete-data | test step completed 7-delete-data logger.go:42: 19:10:17 | demand-backup/8-restore-from-minio | starting test step 8-restore-from-minio logger.go:42: 19:10:17 | demand-backup/8-restore-from-minio | PerconaServerMySQLRestore:kuttl-test-relative-reindeer/demand-backup-restore-minio created logger.go:42: 19:15:25 | demand-backup/8-restore-from-minio | test step completed 8-restore-from-minio logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | starting test step 9-check-password-leak logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + source ../../functions logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ realpath ../../.. logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++++ pwd logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ test_name=demand-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ GIT_BRANCH=PR-893 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++++ which gdate logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++++ which date logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ date=/usr/bin/date logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ oc get projects logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ : logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ kubectl get nodes logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | +++ grep '^minikube' logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ oc get projects logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + check_passwords_leak logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + local secrets logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + local passwords logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + local pods logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + secrets= logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | + passwords=' ' logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pods -o name logger.go:42: 19:15:25 | demand-backup/9-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + pods='demand-backup-haproxy-0 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-haproxy-1 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-haproxy-2 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-mysql-0 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-mysql-1 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-mysql-2 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-orc-0 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-orc-1 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | demand-backup-orc-2 logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | minio-service-8967c7f7f-4dvnh logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | mysql-client logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | xb-demand-backup-minio-minio-7r48g logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | xb-restore-demand-backup-restore-minio-h4cqp' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + collect_logs kuttl-test-relative-reindeer logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + local containers logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + local count logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + NS=kuttl-test-relative-reindeer logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:26 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-0 -c haproxy logger.go:42: 19:15:27 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-haproxy.txt logger.go:42: 19:15:27 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-haproxy.txt logger.go:42: 19:15:27 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:27 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-0 -c mysql-monit logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-mysql-monit.txt logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-0-mysql-monit.txt logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:28 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-1 -c haproxy logger.go:42: 19:15:29 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-haproxy.txt logger.go:42: 19:15:29 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-haproxy.txt logger.go:42: 19:15:29 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:29 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-1 -c mysql-monit logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-mysql-monit.txt logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-1-mysql-monit.txt logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:30 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-2 -c haproxy logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-haproxy.txt logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-haproxy.txt logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-haproxy-2 -c mysql-monit logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-mysql-monit.txt logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-haproxy-2-mysql-monit.txt logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:31 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:32 | demand-backup/9-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:15:32 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:32 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c mysql logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-mysql.txt logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-mysql.txt logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c xtrabackup logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-xtrabackup.txt logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-xtrabackup.txt logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:33 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-0 -c pt-heartbeat logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-pt-heartbeat.txt logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-0-pt-heartbeat.txt logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:34 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c mysql logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-mysql.txt logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-mysql.txt logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:35 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c xtrabackup logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-xtrabackup.txt logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-xtrabackup.txt logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-1 -c pt-heartbeat logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-pt-heartbeat.txt logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-1-pt-heartbeat.txt logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:36 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c mysql logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-mysql.txt logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-mysql.txt logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:37 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c xtrabackup logger.go:42: 19:15:38 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-xtrabackup.txt logger.go:42: 19:15:38 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-xtrabackup.txt logger.go:42: 19:15:38 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:38 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-mysql-2 -c pt-heartbeat logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-pt-heartbeat.txt logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-mysql-2-pt-heartbeat.txt logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:39 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-0 -c orc logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-orc.txt logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-orc.txt logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-0 -c mysql-monit logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-mysql-monit.txt logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-0-mysql-monit.txt logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:40 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:41 | demand-backup/9-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:15:41 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:41 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-1 -c orc logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-orc.txt logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-orc.txt logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-1 -c mysql-monit logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-mysql-monit.txt logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-1-mysql-monit.txt logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:42 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod demand-backup-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + containers='orc mysql-monit' logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-2 -c orc logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-orc.txt logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-orc.txt logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:43 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs demand-backup-orc-2 -c mysql-monit logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-mysql-monit.txt logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-demand-backup-orc-2-mysql-monit.txt logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod minio-service-8967c7f7f-4dvnh -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + containers=minio logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:44 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs minio-service-8967c7f7f-4dvnh -c minio logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-minio-service-8967c7f7f-4dvnh-minio.txt logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-minio-service-8967c7f7f-4dvnh-minio.txt logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + containers=mysql-client logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:45 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs mysql-client -c mysql-client logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-mysql-client-mysql-client.txt logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-mysql-client-mysql-client.txt logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod xb-demand-backup-minio-minio-7r48g -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + containers=xtrabackup logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:46 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs xb-demand-backup-minio-minio-7r48g -c xtrabackup logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-demand-backup-minio-minio-7r48g-xtrabackup.txt logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-demand-backup-minio-minio-7r48g-xtrabackup.txt logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | ++ kubectl -n kuttl-test-relative-reindeer get pod xb-restore-demand-backup-restore-minio-h4cqp -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + containers=xtrabackup logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:47 | demand-backup/9-check-password-leak | + kubectl -n kuttl-test-relative-reindeer logs xb-restore-demand-backup-restore-minio-h4cqp -c xtrabackup logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-restore-demand-backup-restore-minio-h4cqp-xtrabackup.txt logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-xb-restore-demand-backup-restore-minio-h4cqp-xtrabackup.txt logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 19:15:48 | demand-backup/9-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + pods=percona-server-mysql-operator-bf7db58fb-gkgbj logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + collect_logs ps-operator logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + local containers logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + local count logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + NS=ps-operator logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + for p in '$pods' logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-bf7db58fb-gkgbj -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + containers=manager logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + for c in '$containers' logger.go:42: 19:15:49 | demand-backup/9-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-bf7db58fb-gkgbj -c manager logger.go:42: 19:15:50 | demand-backup/9-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-percona-server-mysql-operator-bf7db58fb-gkgbj-manager.txt logger.go:42: 19:15:50 | demand-backup/9-check-password-leak | logs saved in: /tmp/kuttl/ps/demand-backup/logs_output-percona-server-mysql-operator-bf7db58fb-gkgbj-manager.txt logger.go:42: 19:15:50 | demand-backup/9-check-password-leak | + echo logger.go:42: 19:15:50 | demand-backup/9-check-password-leak | logger.go:42: 19:15:50 | demand-backup/9-check-password-leak | test step completed 9-check-password-leak logger.go:42: 19:15:50 | demand-backup/10-read-data | starting test step 10-read-data logger.go:42: 19:15:50 | demand-backup/10-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 06-read-data-minio-${i} --from-literal=data="${data}" done] logger.go:42: 19:15:50 | demand-backup/10-read-data | + source ../../functions logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ realpath ../../.. logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:50 | demand-backup/10-read-data | ++++ pwd logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ test_name=demand-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:50 | demand-backup/10-read-data | ++++ which gdate logger.go:42: 19:15:50 | demand-backup/10-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:15:50 | demand-backup/10-read-data | ++++ which date logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ date=/usr/bin/date logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ oc get projects logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ : logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ kubectl get nodes logger.go:42: 19:15:50 | demand-backup/10-read-data | +++ grep '^minikube' logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ oc get projects logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ get_cluster_name logger.go:42: 19:15:50 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:15:51 | demand-backup/10-read-data | + cluster_name=demand-backup logger.go:42: 19:15:51 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ local pod= logger.go:42: 19:15:51 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 19:15:51 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 19:15:51 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 19:15:52 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 19:15:52 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:52 | demand-backup/10-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:15:52 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:15:52 | demand-backup/10-read-data | + data=100500 logger.go:42: 19:15:52 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-minio-0 --from-literal=data=100500 logger.go:42: 19:15:53 | demand-backup/10-read-data | configmap/06-read-data-minio-0 created logger.go:42: 19:15:53 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ local pod= logger.go:42: 19:15:53 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 19:15:53 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 19:15:53 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 19:15:54 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 19:15:54 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:54 | demand-backup/10-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:15:54 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:15:54 | demand-backup/10-read-data | + data=100500 logger.go:42: 19:15:54 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-minio-1 --from-literal=data=100500 logger.go:42: 19:15:55 | demand-backup/10-read-data | configmap/06-read-data-minio-1 created logger.go:42: 19:15:55 | demand-backup/10-read-data | + for i in 0 1 2 logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ local pod= logger.go:42: 19:15:55 | demand-backup/10-read-data | +++ get_client_pod logger.go:42: 19:15:55 | demand-backup/10-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ client_pod=mysql-client logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ wait_pod mysql-client logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ local pod=mysql-client logger.go:42: 19:15:55 | demand-backup/10-read-data | ++ set +o xtrace logger.go:42: 19:15:56 | demand-backup/10-read-data | mysql-clienttrue logger.go:42: 19:15:56 | demand-backup/10-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:15:56 | demand-backup/10-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:15:56 | demand-backup/10-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:15:56 | demand-backup/10-read-data | + data=100500 logger.go:42: 19:15:56 | demand-backup/10-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-minio-2 --from-literal=data=100500 logger.go:42: 19:15:57 | demand-backup/10-read-data | configmap/06-read-data-minio-2 created logger.go:42: 19:15:58 | demand-backup/10-read-data | test step completed 10-read-data logger.go:42: 19:15:58 | demand-backup/11-delete-data | starting test step 11-delete-data logger.go:42: 19:15:58 | demand-backup/11-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-delete-data-minio-backup-source-${i} --from-literal=data="${data}" done] logger.go:42: 19:15:58 | demand-backup/11-delete-data | + source ../../functions logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ realpath ../../.. logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++++ pwd logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++ test_name=demand-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++++ which gdate logger.go:42: 19:15:58 | demand-backup/11-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++++ which date logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ date=/usr/bin/date logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ oc get projects logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ : logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ kubectl get nodes logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ grep '^minikube' logger.go:42: 19:15:58 | demand-backup/11-delete-data | ++ oc get projects logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ get_cluster_name logger.go:42: 19:15:58 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:15:59 | demand-backup/11-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:15:59 | demand-backup/11-delete-data | ++ local cluster=demand-backup logger.go:42: 19:15:59 | demand-backup/11-delete-data | ++ echo demand-backup-haproxy logger.go:42: 19:15:59 | demand-backup/11-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + local pod= logger.go:42: 19:15:59 | demand-backup/11-delete-data | ++ get_client_pod logger.go:42: 19:15:59 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + client_pod=mysql-client logger.go:42: 19:15:59 | demand-backup/11-delete-data | + wait_pod mysql-client logger.go:42: 19:15:59 | demand-backup/11-delete-data | + local pod=mysql-client logger.go:42: 19:15:59 | demand-backup/11-delete-data | + set +o xtrace logger.go:42: 19:15:59 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 19:15:59 | demand-backup/11-delete-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:15:59 | demand-backup/11-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:16:00 | demand-backup/11-delete-data | + : logger.go:42: 19:16:00 | demand-backup/11-delete-data | ++ get_cluster_name logger.go:42: 19:16:00 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:16:01 | demand-backup/11-delete-data | + cluster_name=demand-backup logger.go:42: 19:16:01 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 19:16:01 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 19:16:01 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 19:16:01 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:16:01 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:16:02 | demand-backup/11-delete-data | ++ : logger.go:42: 19:16:02 | demand-backup/11-delete-data | + data= logger.go:42: 19:16:02 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-backup-source-0 --from-literal=data= logger.go:42: 19:16:03 | demand-backup/11-delete-data | configmap/04-delete-data-minio-backup-source-0 created logger.go:42: 19:16:03 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 19:16:03 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 19:16:03 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 19:16:03 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 19:16:04 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 19:16:04 | demand-backup/11-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:16:04 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:16:04 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ : logger.go:42: 19:16:05 | demand-backup/11-delete-data | + data= logger.go:42: 19:16:05 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-backup-source-1 --from-literal=data= logger.go:42: 19:16:05 | demand-backup/11-delete-data | configmap/04-delete-data-minio-backup-source-1 created logger.go:42: 19:16:05 | demand-backup/11-delete-data | + for i in 0 1 2 logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ local pod= logger.go:42: 19:16:05 | demand-backup/11-delete-data | +++ get_client_pod logger.go:42: 19:16:05 | demand-backup/11-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ client_pod=mysql-client logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ wait_pod mysql-client logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ local pod=mysql-client logger.go:42: 19:16:05 | demand-backup/11-delete-data | ++ set +o xtrace logger.go:42: 19:16:06 | demand-backup/11-delete-data | mysql-clienttrue logger.go:42: 19:16:06 | demand-backup/11-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:16:06 | demand-backup/11-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:16:06 | demand-backup/11-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:16:07 | demand-backup/11-delete-data | ++ : logger.go:42: 19:16:07 | demand-backup/11-delete-data | + data= logger.go:42: 19:16:07 | demand-backup/11-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 04-delete-data-minio-backup-source-2 --from-literal=data= logger.go:42: 19:16:07 | demand-backup/11-delete-data | configmap/04-delete-data-minio-backup-source-2 created logger.go:42: 19:16:08 | demand-backup/11-delete-data | test step completed 11-delete-data logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | starting test step 12-restore-from-minio-backup-source logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | running command: [sh -c set -o errexit set -o xtrace source ../../functions storage_name="minio" backup_name="demand-backup-minio" restore_name="demand-backup-restore-minio-backup-source" cluster_name="${test_name}${name_suffix:+-$name_suffix}" destination=$(kubectl -n "${NAMESPACE}" get ps-backup "${backup_name}" -o jsonpath='{.status.destination}') cat "${DEPLOY_DIR}/restore.yaml" \ | yq eval "$(printf '.metadata.name="%s"' "${restore_name}")" - \ | yq eval "$(printf '.spec.clusterName="%s"' "${cluster_name}")" - \ | yq eval "del(.spec.backupName)" - \ | yq eval "$(printf '.spec.backupSource.destination="%s"' "${destination}")" - \ | yq eval '.spec.backupSource.storage.type="s3"' - \ | yq eval '.spec.backupSource.storage.s3.bucket="operator-testing"' - \ | yq eval '.spec.backupSource.storage.s3.credentialsSecret="minio-secret"' - \ | yq eval "$(printf '.spec.backupSource.storage.s3.endpointUrl="http://minio-service.%s:9000"' "${NAMESPACE}")" - \ | yq eval '.spec.backupSource.storage.s3.region="us-east-1"' - \ | kubectl apply -n "${NAMESPACE}" -f -] logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | + source ../../functions logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ realpath ../../.. logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++++ pwd logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++ test_name=demand-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ GIT_BRANCH=PR-893 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++++ which gdate logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++++ which date logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ date=/usr/bin/date logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ oc get projects logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ : logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ kubectl get nodes logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | +++ grep '^minikube' logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++ oc get projects logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | + storage_name=minio logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | + backup_name=demand-backup-minio logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | + restore_name=demand-backup-restore-minio-backup-source logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | + cluster_name=demand-backup logger.go:42: 19:16:08 | demand-backup/12-restore-from-minio-backup-source | ++ kubectl -n kuttl-test-relative-reindeer get ps-backup demand-backup-minio -o 'jsonpath={.status.destination}' logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + destination=s3://operator-testing/demand-backup-2025-05-12-19:09:30-full logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + cat /mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy/restore.yaml logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval 'del(.spec.backupName)' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | ++ printf '.spec.clusterName="%s"' demand-backup logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.clusterName="demand-backup"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.storage.s3.credentialsSecret="minio-secret"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.storage.s3.bucket="operator-testing"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | ++ printf '.metadata.name="%s"' demand-backup-restore-minio-backup-source logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.metadata.name="demand-backup-restore-minio-backup-source"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.storage.s3.region="us-east-1"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + kubectl apply -n kuttl-test-relative-reindeer -f - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | ++ printf '.spec.backupSource.destination="%s"' s3://operator-testing/demand-backup-2025-05-12-19:09:30-full logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.storage.type="s3"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.destination="s3://operator-testing/demand-backup-2025-05-12-19:09:30-full"' - logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | ++ printf '.spec.backupSource.storage.s3.endpointUrl="http://minio-service.%s:9000"' kuttl-test-relative-reindeer logger.go:42: 19:16:09 | demand-backup/12-restore-from-minio-backup-source | + yq eval '.spec.backupSource.storage.s3.endpointUrl="http://minio-service.kuttl-test-relative-reindeer:9000"' - logger.go:42: 19:16:10 | demand-backup/12-restore-from-minio-backup-source | perconaservermysqlrestore.ps.percona.com/demand-backup-restore-minio-backup-source created logger.go:42: 19:21:20 | demand-backup/12-restore-from-minio-backup-source | test step completed 12-restore-from-minio-backup-source logger.go:42: 19:21:20 | demand-backup/13-read-data | starting test step 13-read-data logger.go:42: 19:21:20 | demand-backup/13-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 09-read-data-minio-backup-source-${i} --from-literal=data="${data}" done] logger.go:42: 19:21:20 | demand-backup/13-read-data | + source ../../functions logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ realpath ../../.. logger.go:42: 19:21:20 | demand-backup/13-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:20 | demand-backup/13-read-data | ++++ pwd logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | ++ test_name=demand-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:21:20 | demand-backup/13-read-data | ++++ which gdate logger.go:42: 19:21:20 | demand-backup/13-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:21:20 | demand-backup/13-read-data | ++++ which date logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ date=/usr/bin/date logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ oc get projects logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ : logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ kubectl get nodes logger.go:42: 19:21:20 | demand-backup/13-read-data | +++ grep '^minikube' logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ oc get projects logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ get_cluster_name logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:21:21 | demand-backup/13-read-data | + cluster_name=demand-backup logger.go:42: 19:21:21 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:21 | demand-backup/13-read-data | ++ local pod= logger.go:42: 19:21:21 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 19:21:21 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 19:21:22 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:22 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:23 | demand-backup/13-read-data | + data=100500 logger.go:42: 19:21:23 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 09-read-data-minio-backup-source-0 --from-literal=data=100500 logger.go:42: 19:21:23 | demand-backup/13-read-data | configmap/09-read-data-minio-backup-source-0 created logger.go:42: 19:21:23 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 19:21:23 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:23 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:23 | demand-backup/13-read-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:23 | demand-backup/13-read-data | ++ local pod= logger.go:42: 19:21:23 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 19:21:23 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 19:21:24 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:24 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:25 | demand-backup/13-read-data | + data=100500 logger.go:42: 19:21:25 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 09-read-data-minio-backup-source-1 --from-literal=data=100500 logger.go:42: 19:21:25 | demand-backup/13-read-data | configmap/09-read-data-minio-backup-source-1 created logger.go:42: 19:21:25 | demand-backup/13-read-data | + for i in 0 1 2 logger.go:42: 19:21:25 | demand-backup/13-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:25 | demand-backup/13-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:25 | demand-backup/13-read-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:25 | demand-backup/13-read-data | ++ local pod= logger.go:42: 19:21:25 | demand-backup/13-read-data | +++ get_client_pod logger.go:42: 19:21:25 | demand-backup/13-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ client_pod=mysql-client logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ wait_pod mysql-client logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ local pod=mysql-client logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ set +o xtrace logger.go:42: 19:21:26 | demand-backup/13-read-data | mysql-clienttrue logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:26 | demand-backup/13-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:27 | demand-backup/13-read-data | + data=100500 logger.go:42: 19:21:27 | demand-backup/13-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 09-read-data-minio-backup-source-2 --from-literal=data=100500 logger.go:42: 19:21:27 | demand-backup/13-read-data | configmap/09-read-data-minio-backup-source-2 created logger.go:42: 19:21:28 | demand-backup/13-read-data | test step completed 13-read-data logger.go:42: 19:21:28 | demand-backup/14-create-backup-s3 | starting test step 14-create-backup-s3 logger.go:42: 19:21:28 | demand-backup/14-create-backup-s3 | PerconaServerMySQLBackup:kuttl-test-relative-reindeer/demand-backup-s3 created logger.go:42: 19:21:39 | demand-backup/14-create-backup-s3 | test step completed 14-create-backup-s3 logger.go:42: 19:21:39 | demand-backup/15-delete-data | starting test step 15-delete-data logger.go:42: 19:21:39 | demand-backup/15-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 08-delete-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 19:21:39 | demand-backup/15-delete-data | + source ../../functions logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ realpath ../../.. logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++++ pwd logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++ test_name=demand-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++++ which gdate logger.go:42: 19:21:39 | demand-backup/15-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++++ which date logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ date=/usr/bin/date logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ oc get projects logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ : logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ kubectl get nodes logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ grep '^minikube' logger.go:42: 19:21:39 | demand-backup/15-delete-data | ++ oc get projects logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ get_cluster_name logger.go:42: 19:21:39 | demand-backup/15-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:21:40 | demand-backup/15-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:21:40 | demand-backup/15-delete-data | ++ local cluster=demand-backup logger.go:42: 19:21:40 | demand-backup/15-delete-data | ++ echo demand-backup-haproxy logger.go:42: 19:21:40 | demand-backup/15-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + local pod= logger.go:42: 19:21:40 | demand-backup/15-delete-data | ++ get_client_pod logger.go:42: 19:21:40 | demand-backup/15-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + client_pod=mysql-client logger.go:42: 19:21:40 | demand-backup/15-delete-data | + wait_pod mysql-client logger.go:42: 19:21:40 | demand-backup/15-delete-data | + local pod=mysql-client logger.go:42: 19:21:40 | demand-backup/15-delete-data | + set +o xtrace logger.go:42: 19:21:40 | demand-backup/15-delete-data | mysql-clienttrue logger.go:42: 19:21:40 | demand-backup/15-delete-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:21:40 | demand-backup/15-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:41 | demand-backup/15-delete-data | + : logger.go:42: 19:21:41 | demand-backup/15-delete-data | ++ get_cluster_name logger.go:42: 19:21:41 | demand-backup/15-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:21:42 | demand-backup/15-delete-data | + cluster_name=demand-backup logger.go:42: 19:21:42 | demand-backup/15-delete-data | + for i in 0 1 2 logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ local pod= logger.go:42: 19:21:42 | demand-backup/15-delete-data | +++ get_client_pod logger.go:42: 19:21:42 | demand-backup/15-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ client_pod=mysql-client logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ wait_pod mysql-client logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ local pod=mysql-client logger.go:42: 19:21:42 | demand-backup/15-delete-data | ++ set +o xtrace logger.go:42: 19:21:43 | demand-backup/15-delete-data | mysql-clienttrue logger.go:42: 19:21:43 | demand-backup/15-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:43 | demand-backup/15-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:43 | demand-backup/15-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:43 | demand-backup/15-delete-data | ++ : logger.go:42: 19:21:43 | demand-backup/15-delete-data | + data= logger.go:42: 19:21:43 | demand-backup/15-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 08-delete-data-s3-0 --from-literal=data= logger.go:42: 19:21:44 | demand-backup/15-delete-data | configmap/08-delete-data-s3-0 created logger.go:42: 19:21:44 | demand-backup/15-delete-data | + for i in 0 1 2 logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ local pod= logger.go:42: 19:21:44 | demand-backup/15-delete-data | +++ get_client_pod logger.go:42: 19:21:44 | demand-backup/15-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ client_pod=mysql-client logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ wait_pod mysql-client logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ local pod=mysql-client logger.go:42: 19:21:44 | demand-backup/15-delete-data | ++ set +o xtrace logger.go:42: 19:21:45 | demand-backup/15-delete-data | mysql-clienttrue logger.go:42: 19:21:45 | demand-backup/15-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:45 | demand-backup/15-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:45 | demand-backup/15-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:45 | demand-backup/15-delete-data | ++ : logger.go:42: 19:21:45 | demand-backup/15-delete-data | + data= logger.go:42: 19:21:45 | demand-backup/15-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 08-delete-data-s3-1 --from-literal=data= logger.go:42: 19:21:46 | demand-backup/15-delete-data | configmap/08-delete-data-s3-1 created logger.go:42: 19:21:46 | demand-backup/15-delete-data | + for i in 0 1 2 logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ local pod= logger.go:42: 19:21:46 | demand-backup/15-delete-data | +++ get_client_pod logger.go:42: 19:21:46 | demand-backup/15-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ client_pod=mysql-client logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ wait_pod mysql-client logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ local pod=mysql-client logger.go:42: 19:21:46 | demand-backup/15-delete-data | ++ set +o xtrace logger.go:42: 19:21:47 | demand-backup/15-delete-data | mysql-clienttrue logger.go:42: 19:21:47 | demand-backup/15-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:21:47 | demand-backup/15-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:21:47 | demand-backup/15-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:21:47 | demand-backup/15-delete-data | ++ : logger.go:42: 19:21:47 | demand-backup/15-delete-data | + data= logger.go:42: 19:21:47 | demand-backup/15-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 08-delete-data-s3-2 --from-literal=data= logger.go:42: 19:21:48 | demand-backup/15-delete-data | configmap/08-delete-data-s3-2 created logger.go:42: 19:21:49 | demand-backup/15-delete-data | test step completed 15-delete-data logger.go:42: 19:21:49 | demand-backup/16-restore-from-s3 | starting test step 16-restore-from-s3 logger.go:42: 19:21:49 | demand-backup/16-restore-from-s3 | PerconaServerMySQLRestore:kuttl-test-relative-reindeer/demand-backup-restore-s3 created logger.go:42: 19:26:59 | demand-backup/16-restore-from-s3 | test step completed 16-restore-from-s3 logger.go:42: 19:26:59 | demand-backup/17-read-data | starting test step 17-read-data logger.go:42: 19:26:59 | demand-backup/17-read-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 06-read-data-s3-${i} --from-literal=data="${data}" done] logger.go:42: 19:26:59 | demand-backup/17-read-data | + source ../../functions logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ realpath ../../.. logger.go:42: 19:26:59 | demand-backup/17-read-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:26:59 | demand-backup/17-read-data | ++++ pwd logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | ++ test_name=demand-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:26:59 | demand-backup/17-read-data | ++++ which gdate logger.go:42: 19:26:59 | demand-backup/17-read-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:26:59 | demand-backup/17-read-data | ++++ which date logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ date=/usr/bin/date logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ oc get projects logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ : logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ kubectl get nodes logger.go:42: 19:26:59 | demand-backup/17-read-data | +++ grep '^minikube' logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ oc get projects logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ get_cluster_name logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:00 | demand-backup/17-read-data | + cluster_name=demand-backup logger.go:42: 19:27:00 | demand-backup/17-read-data | + for i in 0 1 2 logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:00 | demand-backup/17-read-data | ++ local pod= logger.go:42: 19:27:00 | demand-backup/17-read-data | +++ get_client_pod logger.go:42: 19:27:00 | demand-backup/17-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ client_pod=mysql-client logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ wait_pod mysql-client logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ local pod=mysql-client logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ set +o xtrace logger.go:42: 19:27:01 | demand-backup/17-read-data | mysql-clienttrue logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:01 | demand-backup/17-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:02 | demand-backup/17-read-data | + data=100500 logger.go:42: 19:27:02 | demand-backup/17-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-s3-0 --from-literal=data=100500 logger.go:42: 19:27:02 | demand-backup/17-read-data | configmap/06-read-data-s3-0 created logger.go:42: 19:27:02 | demand-backup/17-read-data | + for i in 0 1 2 logger.go:42: 19:27:02 | demand-backup/17-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:02 | demand-backup/17-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:02 | demand-backup/17-read-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:02 | demand-backup/17-read-data | ++ local pod= logger.go:42: 19:27:02 | demand-backup/17-read-data | +++ get_client_pod logger.go:42: 19:27:02 | demand-backup/17-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ client_pod=mysql-client logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ wait_pod mysql-client logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ local pod=mysql-client logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ set +o xtrace logger.go:42: 19:27:03 | demand-backup/17-read-data | mysql-clienttrue logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:03 | demand-backup/17-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:04 | demand-backup/17-read-data | + data=100500 logger.go:42: 19:27:04 | demand-backup/17-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-s3-1 --from-literal=data=100500 logger.go:42: 19:27:04 | demand-backup/17-read-data | configmap/06-read-data-s3-1 created logger.go:42: 19:27:04 | demand-backup/17-read-data | + for i in 0 1 2 logger.go:42: 19:27:04 | demand-backup/17-read-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:04 | demand-backup/17-read-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:04 | demand-backup/17-read-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:04 | demand-backup/17-read-data | ++ local pod= logger.go:42: 19:27:04 | demand-backup/17-read-data | +++ get_client_pod logger.go:42: 19:27:04 | demand-backup/17-read-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ client_pod=mysql-client logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ wait_pod mysql-client logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ local pod=mysql-client logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ set +o xtrace logger.go:42: 19:27:05 | demand-backup/17-read-data | mysql-clienttrue logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:05 | demand-backup/17-read-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:06 | demand-backup/17-read-data | + data=100500 logger.go:42: 19:27:06 | demand-backup/17-read-data | + kubectl create configmap -n kuttl-test-relative-reindeer 06-read-data-s3-2 --from-literal=data=100500 logger.go:42: 19:27:06 | demand-backup/17-read-data | configmap/06-read-data-s3-2 created logger.go:42: 19:27:07 | demand-backup/17-read-data | test step completed 17-read-data logger.go:42: 19:27:07 | demand-backup/18-create-backup-gcp | starting test step 18-create-backup-gcp logger.go:42: 19:27:08 | demand-backup/18-create-backup-gcp | PerconaServerMySQLBackup:kuttl-test-relative-reindeer/demand-backup-gcp created logger.go:42: 19:27:18 | demand-backup/18-create-backup-gcp | test step completed 18-create-backup-gcp logger.go:42: 19:27:18 | demand-backup/19-delete-data | starting test step 19-delete-data logger.go:42: 19:27:18 | demand-backup/19-delete-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "TRUNCATE TABLE myDB.myTable" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" cluster_name=$(get_cluster_name) for i in 0 1 2; do data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${cluster_name}-mysql-${i}.${cluster_name}-mysql -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 12-delete-data-gcp-${i} --from-literal=data="${data}" done] logger.go:42: 19:27:18 | demand-backup/19-delete-data | + source ../../functions logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ realpath ../../.. logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++++ pwd logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/tests/demand-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++ test_name=demand-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/vars.sh logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-893 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/deploy logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-893/e2e-tests/conf logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export GIT_BRANCH=PR-893 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ GIT_BRANCH=PR-893 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export VERSION=PR-893-8b3e0608 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ VERSION=PR-893-8b3e0608 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-893-8b3e0608 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++++ which gdate logger.go:42: 19:27:18 | demand-backup/19-delete-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-893/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++++ which date logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ date=/usr/bin/date logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ oc get projects logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ : logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ kubectl get nodes logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ grep '^minikube' logger.go:42: 19:27:18 | demand-backup/19-delete-data | ++ oc get projects logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ get_cluster_name logger.go:42: 19:27:18 | demand-backup/19-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:19 | demand-backup/19-delete-data | ++ get_haproxy_svc demand-backup logger.go:42: 19:27:19 | demand-backup/19-delete-data | ++ local cluster=demand-backup logger.go:42: 19:27:19 | demand-backup/19-delete-data | ++ echo demand-backup-haproxy logger.go:42: 19:27:19 | demand-backup/19-delete-data | + run_mysql 'TRUNCATE TABLE myDB.myTable' '-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:27:19 | demand-backup/19-delete-data | + local 'command=TRUNCATE TABLE myDB.myTable' logger.go:42: 19:27:19 | demand-backup/19-delete-data | + local 'uri=-h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:27:19 | demand-backup/19-delete-data | + local pod= logger.go:42: 19:27:19 | demand-backup/19-delete-data | ++ get_client_pod logger.go:42: 19:27:19 | demand-backup/19-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:19 | demand-backup/19-delete-data | + client_pod=mysql-client logger.go:42: 19:27:19 | demand-backup/19-delete-data | + wait_pod mysql-client logger.go:42: 19:27:19 | demand-backup/19-delete-data | + local pod=mysql-client logger.go:42: 19:27:19 | demand-backup/19-delete-data | + set +o xtrace logger.go:42: 19:27:20 | demand-backup/19-delete-data | mysql-clienttrue logger.go:42: 19:27:20 | demand-backup/19-delete-data | + kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "TRUNCATE TABLE myDB.myTable" | mysql -sN -h demand-backup-haproxy -uroot -proot_password' logger.go:42: 19:27:20 | demand-backup/19-delete-data | + sed -e 's/mysql: //' logger.go:42: 19:27:20 | demand-backup/19-delete-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:27:25 | demand-backup/19-delete-data | + : logger.go:42: 19:27:25 | demand-backup/19-delete-data | ++ get_cluster_name logger.go:42: 19:27:25 | demand-backup/19-delete-data | ++ kubectl -n kuttl-test-relative-reindeer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 19:27:26 | demand-backup/19-delete-data | + cluster_name=demand-backup logger.go:42: 19:27:26 | demand-backup/19-delete-data | + for i in 0 1 2 logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ local 'uri=-h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ local pod= logger.go:42: 19:27:26 | demand-backup/19-delete-data | +++ get_client_pod logger.go:42: 19:27:26 | demand-backup/19-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ client_pod=mysql-client logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ wait_pod mysql-client logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ local pod=mysql-client logger.go:42: 19:27:26 | demand-backup/19-delete-data | ++ set +o xtrace logger.go:42: 19:27:27 | demand-backup/19-delete-data | mysql-clienttrue logger.go:42: 19:27:27 | demand-backup/19-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-0.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:27:27 | demand-backup/19-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:27:27 | demand-backup/19-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:29:39 | demand-backup/19-delete-data | + data='ERROR 2003 (HY000): Can'\''t connect to MySQL server on '\''demand-backup-mysql-0.demand-backup-mysql:3306'\'' (110) logger.go:42: 19:29:39 | demand-backup/19-delete-data | command terminated with exit code 1' logger.go:42: 19:29:39 | demand-backup/19-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 12-delete-data-gcp-0 '--from-literal=data=ERROR 2003 (HY000): Can'\''t connect to MySQL server on '\''demand-backup-mysql-0.demand-backup-mysql:3306'\'' (110) logger.go:42: 19:29:39 | demand-backup/19-delete-data | command terminated with exit code 1' logger.go:42: 19:29:39 | demand-backup/19-delete-data | configmap/12-delete-data-gcp-0 created logger.go:42: 19:29:39 | demand-backup/19-delete-data | + for i in 0 1 2 logger.go:42: 19:29:39 | demand-backup/19-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:39 | demand-backup/19-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:29:39 | demand-backup/19-delete-data | ++ local 'uri=-h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:39 | demand-backup/19-delete-data | ++ local pod= logger.go:42: 19:29:39 | demand-backup/19-delete-data | +++ get_client_pod logger.go:42: 19:29:39 | demand-backup/19-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ client_pod=mysql-client logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ wait_pod mysql-client logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ local pod=mysql-client logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ set +o xtrace logger.go:42: 19:29:40 | demand-backup/19-delete-data | mysql-clienttrue logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:29:40 | demand-backup/19-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-1.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:41 | demand-backup/19-delete-data | + data=100500 logger.go:42: 19:29:41 | demand-backup/19-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 12-delete-data-gcp-1 --from-literal=data=100500 logger.go:42: 19:29:41 | demand-backup/19-delete-data | configmap/12-delete-data-gcp-1 created logger.go:42: 19:29:41 | demand-backup/19-delete-data | + for i in 0 1 2 logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ local 'uri=-h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ local pod= logger.go:42: 19:29:41 | demand-backup/19-delete-data | +++ get_client_pod logger.go:42: 19:29:41 | demand-backup/19-delete-data | +++ kubectl -n kuttl-test-relative-reindeer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ client_pod=mysql-client logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ wait_pod mysql-client logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ local pod=mysql-client logger.go:42: 19:29:41 | demand-backup/19-delete-data | ++ set +o xtrace logger.go:42: 19:29:42 | demand-backup/19-delete-data | mysql-clienttrue logger.go:42: 19:29:42 | demand-backup/19-delete-data | ++ kubectl -n kuttl-test-relative-reindeer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h demand-backup-mysql-2.demand-backup-mysql -uroot -proot_password' logger.go:42: 19:29:42 | demand-backup/19-delete-data | ++ sed -e 's/mysql: //' logger.go:42: 19:29:42 | demand-backup/19-delete-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 19:29:43 | demand-backup/19-delete-data | + data=100500 logger.go:42: 19:29:43 | demand-backup/19-delete-data | + kubectl create configmap -n kuttl-test-relative-reindeer 12-delete-data-gcp-2 --from-literal=data=100500 logger.go:42: 19:29:43 | demand-backup/19-delete-data | configmap/12-delete-data-gcp-2 created logger.go:42: 19:30:14 | demand-backup/19-delete-data | test step failed 19-delete-data case.go:378: failed in step 19-delete-data case.go:380: --- ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-0 +++ ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-0 @@ -1,8 +1,11 @@ apiVersion: v1 data: - data: "" + data: |- + ERROR 2003 (HY000): Can't connect to MySQL server on 'demand-backup-mysql-0.demand-backup-mysql:3306' (110) + command terminated with exit code 1 kind: ConfigMap metadata: + managedFields: '[... elided field over 10 lines long ...]' name: 12-delete-data-gcp-0 namespace: kuttl-test-relative-reindeer case.go:380: resource ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-0: .data.data: value mismatch, expected: != actual: ERROR 2003 (HY000): Can't connect to MySQL server on 'demand-backup-mysql-0.demand-backup-mysql:3306' (110) command terminated with exit code 1 case.go:380: --- ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-1 +++ ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-1 @@ -1,8 +1,9 @@ apiVersion: v1 data: - data: "" + data: "100500" kind: ConfigMap metadata: + managedFields: '[... elided field over 10 lines long ...]' name: 12-delete-data-gcp-1 namespace: kuttl-test-relative-reindeer case.go:380: resource ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-1: .data.data: value mismatch, expected: != actual: 100500 case.go:380: --- ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-2 +++ ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-2 @@ -1,8 +1,9 @@ apiVersion: v1 data: - data: "" + data: "100500" kind: ConfigMap metadata: + managedFields: '[... elided field over 10 lines long ...]' name: 12-delete-data-gcp-2 namespace: kuttl-test-relative-reindeer case.go:380: resource ConfigMap:kuttl-test-relative-reindeer/12-delete-data-gcp-2: .data.data: value mismatch, expected: != actual: 100500 logger.go:42: 19:30:14 | demand-backup | demand-backup events from ns kuttl-test-relative-reindeer: logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:39 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/mysql-client to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:40 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "percona/percona-server:8.0.33" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:46 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:46 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-8967c7f7f to 1 deployment-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:47 +0000 UTC Normal ReplicaSet.apps minio-service-8967c7f7f SuccessfulCreate Created pod: minio-service-8967c7f7f-4dvnh replicaset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:47 +0000 UTC Normal Pod minio-service-post-job-4d28t Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/minio-service-post-job-4d28t to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:47 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-4d28t job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:47 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:47 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-relative-reindeer/minio-service" pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:48 +0000 UTC Normal Pod minio-service-post-job-4d28t.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:50 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-c89f314c-e608-40ac-b521-ca7dce9691fd pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:51 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/minio-service-8967c7f7f-4dvnh to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:00:59 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c89f314c-e608-40ac-b521-ca7dce9691fd" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:00 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:00 +0000 UTC Normal Pod minio-service-post-job-4d28t.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" in 11.74s (11.839s including waiting). Image size: 68032340 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:00 +0000 UTC Normal Pod minio-service-post-job-4d28t.spec.containers{minio-make-user} Created Created container: minio-make-user kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:00 +0000 UTC Normal Pod minio-service-post-job-4d28t.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:06 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" in 5.14s (5.14s including waiting). Image size: 104927019 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:06 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh.spec.containers{minio} Created Created container: minio kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:06 +0000 UTC Normal Pod minio-service-8967c7f7f-4dvnh.spec.containers{minio} Started Started container minio kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:10 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "percona/percona-server:8.0.33" in 30.461s (30.461s including waiting). Image size: 387098058 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:10 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:11 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:19 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:21 +0000 UTC Normal Pod aws-cli Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/aws-cli to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:21 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:24 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 3.292s (3.292s including waiting). Image size: 30314917 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:24 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container: aws-cli kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:24 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-relative-reindeer/datadir-demand-backup-mysql-0" pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-0 Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:31 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-0 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:32 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:32 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 251ms (251ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:32 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:32 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:34 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:35 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:35 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 1.976s (1.976s including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 225ms (225ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:36 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:42 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:44 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:47 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 221ms (221ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 4.718s (4.718s including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:01:53 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:07 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:07 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-1 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 277ms (277ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:08 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:18 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 24.986s (24.986s including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:18 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:18 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:18 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:18 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 3.278s (3.278s including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 227ms (227ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:21 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:39 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 172ms (172ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:45 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 26.953s (26.953s including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:45 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:45 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:45 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 4.191s (4.191s including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:49 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:52 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:52 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-2 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:53 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:54 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:54 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:54 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-relative-reindeer/datadir-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:54 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-1 Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:54 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:55 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:55 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-0 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:56 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:56 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 268ms (268ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:56 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:56 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-ad357f79-5c59-4fe2-bad8-253e839259b9 pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 5.021s (5.021s including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:02:58 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 3.56s (3.56s including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 230ms (230ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-1 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:02 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 271ms (271ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:03 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 2.137s (2.137s including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 226ms (226ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:04 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:05 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ad357f79-5c59-4fe2-bad8-253e839259b9" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:07 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:07 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 251ms (251ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:07 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:07 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:10 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:11 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:14 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 3.61s (3.61s including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:14 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 215ms (215ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:16 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:16 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-2 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:16 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 220ms (220ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 256ms (256ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 3.556s (3.556s including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 193ms (193ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:22 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:33 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 22.793s (22.793s including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:33 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:33 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:03:33 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 28.362s (28.362s including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:08 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 6.944s (6.944s including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:09 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:09 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:18 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:04:17 Waiting for MySQL ready state 2025/05/12 19:04:17 MySQL is ready 2025/05/12 19:04:17 Peers: [3733613635303134.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3838303538376232.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:04:17 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:04:17 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:04:17 lookup demand-backup-mysql-1 [10.93.2.11] 2025/05/12 19:04:17 PodIP: 10.93.2.11 2025/05/12 19:04:17 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.5] 2025/05/12 19:04:17 PrimaryIP: 10.93.1.5 2025/05/12 19:04:17 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:04:17 Opening connection to 10.93.2.11 2025/05/12 19:04:17 Clone required: true 2025/05/12 19:04:17 Checking if a clone in progress 2025/05/12 19:04:17 Clone in progress: false 2025/05/12 19:04:17 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:04:18 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:18 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:25 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 218ms (218ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:57 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-relative-reindeer/datadir-demand-backup-mysql-2" pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:57 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-2 Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:04:57 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:00 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-9bba503a-e576-4faa-aca7-61d616b6421a pd.csi.storage.gke.io_gke-9c5ae81cf76f44419b38-49e6-1337-vm_9d8aa135-b228-4c90-852c-43b99e02a641 logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:01 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:08 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-9bba503a-e576-4faa-aca7-61d616b6421a" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 289ms (289ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:12 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 23.675s (23.675s including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:05:36 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:07 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 30.92s (30.92s including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:07 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:07 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:07 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:12 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 5.627s (5.627s including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:12 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:13 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:21 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:06:20 Waiting for MySQL ready state 2025/05/12 19:06:20 MySQL is ready 2025/05/12 19:06:20 Peers: [3733613635303134.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3838303538376232.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3965633030613065.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:06:20 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:06:20 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:06:20 lookup demand-backup-mysql-2 [10.93.0.15] 2025/05/12 19:06:20 PodIP: 10.93.0.15 2025/05/12 19:06:20 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.5] 2025/05/12 19:06:20 PrimaryIP: 10.93.1.5 2025/05/12 19:06:20 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:06:20 Opening connection to 10.93.0.15 2025/05/12 19:06:20 Clone required: true 2025/05/12 19:06:20 Checking if a clone in progress 2025/05/12 19:06:20 Clone in progress: false 2025/05/12 19:06:20 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:06:21 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:21 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:06:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 235ms (235ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:14 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/05/12 19:07:14 MySQL state is not ready... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:19 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/05/12 19:07:19 MySQL state is not ready... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:24 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:32 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 262ms (262ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 231ms (231ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 263ms (263ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:41 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 246ms (246ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:41 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:41 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:59 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:07:57 Waiting for MySQL ready state 2025/05/12 19:07:57 MySQL is ready 2025/05/12 19:07:57 Peers: [3561353430623566.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3733613635303134.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3965633030613065.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:07:57 FQDN: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:07:58 Primary: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:07:58 lookup demand-backup-mysql-0 [10.93.1.8] 2025/05/12 19:07:58 PodIP: 10.93.1.8 2025/05/12 19:07:58 lookup demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.2.11] 2025/05/12 19:07:58 PrimaryIP: 10.93.2.11 2025/05/12 19:07:58 Donor: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:07:58 Opening connection to 10.93.1.8 2025/05/12 19:07:58 Clone required: true 2025/05/12 19:07:58 Checking if a clone in progress 2025/05/12 19:07:58 Clone in progress: false 2025/05/12 19:07:58 Cloning from demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:07:59 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:07:59 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:08:02 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 222ms (222ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:08:43 +0000 UTC Warning PerconaServerMySQL.ps.percona.com demand-backup AsyncReplicationNotReady demand-backup-mysql-2: [not_replicating] ps-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:31 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-demand-backup-minio-minio-7r48g to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:31 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:31 +0000 UTC Normal Job.batch xb-demand-backup-minio-minio SuccessfulCreate Created pod: xb-demand-backup-minio-minio-7r48g job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:32 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 243ms (243ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:32 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:32 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:33 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:33 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 244ms (244ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:33 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:33 +0000 UTC Normal Pod xb-demand-backup-minio-minio-7r48g.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:09:39 +0000 UTC Normal Job.batch xb-demand-backup-minio-minio Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:17 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:17 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:17 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:17 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-2 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-2 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:18 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-2 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:19 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-1 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:20 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulDelete delete Pod demand-backup-haproxy-0 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:20 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/05/12 19:10:20 MySQL state is not ready... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:22 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:22 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:22 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:22 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:27 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/05/12 19:10:27 MySQL state is not ready... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:42 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:42 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:42 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:42 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulDelete delete Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:48 +0000 UTC Warning Pod demand-backup-orc-2.spec.containers{orc} Unhealthy Readiness probe failed: Get "http://10.93.0.13:3000/api/health": dial tcp 10.93.0.13:3000: connect: connection refused kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:49 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:49 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:10:49 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-1 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:11:20 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:11:20 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:11:20 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulDelete delete Pod demand-backup-orc-0 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:11:54 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-restore-demand-backup-restore-minio-h4cqp to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:11:54 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-minio SuccessfulCreate Created pod: xb-restore-demand-backup-restore-minio-h4cqp job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:02 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:03 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:04 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 224ms (224ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:04 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:04 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:06 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:06 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 233ms (233ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:06 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:06 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-h4cqp.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:15 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:15 +0000 UTC Warning Pod demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:15 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:15 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-minio Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:16 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:16 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 260ms (260ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:16 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:16 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 233ms (233ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 227ms (227ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:29 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 243ms (243ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:36 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 240ms (240ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 238ms (238ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 236ms (236ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 240ms (240ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:51 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:51 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:51 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 258ms (258ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:51 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:51 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:53 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 220ms (220ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 232ms (232ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:12:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:05 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 215ms (215ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:12 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:17 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 286ms (286ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 232ms (232ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 231ms (231ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:20 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:21 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:21 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 279ms (279ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:22 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ad357f79-5c59-4fe2-bad8-253e839259b9" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:23 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:23 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 239ms (239ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 232ms (232ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 264ms (264ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:25 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 273ms (273ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 218ms (218ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 221ms (221ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:26 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 230ms (230ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 248ms (248ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:27 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 255ms (255ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 217ms (217ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:28 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 226ms (226ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 191ms (191ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:30 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:41 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 231ms (231ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:44 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:13:43 Waiting for MySQL ready state 2025/05/12 19:13:43 MySQL is ready 2025/05/12 19:13:43 Peers: [6165653466663433.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6637613230656636.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:13:43 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:13:43 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:13:43 lookup demand-backup-mysql-1 [10.93.2.16] 2025/05/12 19:13:43 PodIP: 10.93.2.16 2025/05/12 19:13:43 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.9] 2025/05/12 19:13:43 PrimaryIP: 10.93.1.9 2025/05/12 19:13:43 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:13:43 Opening connection to 10.93.2.16 2025/05/12 19:13:43 Clone required: true 2025/05/12 19:13:43 Checking if a clone in progress 2025/05/12 19:13:43 Clone in progress: false 2025/05/12 19:13:43 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:13:44 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:44 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:13:48 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 211ms (211ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:18 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:26 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-9bba503a-e576-4faa-aca7-61d616b6421a" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:28 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 270ms (270ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:28 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:28 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 189ms (189ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 246ms (246ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:30 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 233ms (233ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:31 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:48 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:14:47 Waiting for MySQL ready state 2025/05/12 19:14:47 MySQL is ready 2025/05/12 19:14:47 Peers: [3538303135326433.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6165653466663433.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6637613230656636.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:14:47 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:14:47 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:14:47 lookup demand-backup-mysql-2 [10.93.0.18] 2025/05/12 19:14:47 PodIP: 10.93.0.18 2025/05/12 19:14:47 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.9] 2025/05/12 19:14:47 PrimaryIP: 10.93.1.9 2025/05/12 19:14:47 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:14:47 Opening connection to 10.93.0.18 2025/05/12 19:14:47 Clone required: true 2025/05/12 19:14:47 Checking if a clone in progress 2025/05/12 19:14:47 Clone in progress: false 2025/05/12 19:14:47 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:14:48 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:48 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:14:52 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 182ms (182ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:10 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:11 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:11 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:12 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:12 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:19 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:19 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:19 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:22 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 74c04c6134a697f23a677c297d87cf5c8c8f6da5697ebd8076c3c47c11942f05 not found: not found kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:27 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:41 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:16:41 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:12 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:12 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:46 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-restore-demand-backup-restore-minio-backup-source-7nwsx to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:46 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-minio-backup-source SuccessfulCreate Created pod: xb-restore-demand-backup-restore-minio-backup-source-7nwsx job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:53 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:54 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:55 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 221ms (221ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:55 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:55 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:57 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:57 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 239ms (239ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:57 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:17:57 +0000 UTC Normal Pod xb-restore-demand-backup-restore-minio-backup-source-7nwsx.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:04 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:04 +0000 UTC Warning Pod demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:04 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:04 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-minio-backup-source Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 252ms (252ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:05 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 224ms (224ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 170ms (170ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:18 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 274ms (274ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:24 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 228ms (228ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:25 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:26 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 196ms (196ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 251ms (251ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 154ms (154ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:27 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:40 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:40 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:40 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 223ms (223ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:41 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:41 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:42 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 201ms (201ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 220ms (220ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:43 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:18:54 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 198ms (198ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:00 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:01 +0000 UTC Warning Pod demand-backup-mysql-1 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:08 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:08 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ad357f79-5c59-4fe2-bad8-253e839259b9" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:09 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:09 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 186ms (186ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:09 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:09 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 257ms (257ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 214ms (214ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:11 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 236ms (236ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:12 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:13 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 280ms (280ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:13 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:13 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 214ms (214ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 185ms (185ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:14 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 211ms (211ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 197ms (197ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 183ms (183ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:15 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:16 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:16 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:16 +0000 UTC Warning Pod demand-backup-orc-2 FailedMount MountVolume.SetUp failed for volume "custom" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 234ms (234ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:17 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:17 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 278ms (278ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 194ms (194ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 174ms (174ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:19 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 215ms (215ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 219ms (219ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:20 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:31 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 228ms (228ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:33 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:19:31 Waiting for MySQL ready state 2025/05/12 19:19:31 MySQL is ready 2025/05/12 19:19:31 Peers: [3965343365653430.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6262303032663465.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:19:31 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:19:31 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:19:31 lookup demand-backup-mysql-1 [10.93.2.20] 2025/05/12 19:19:31 PodIP: 10.93.2.20 2025/05/12 19:19:31 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.12] 2025/05/12 19:19:31 PrimaryIP: 10.93.1.12 2025/05/12 19:19:31 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:19:31 Opening connection to 10.93.2.20 2025/05/12 19:19:31 Clone required: true 2025/05/12 19:19:31 Checking if a clone in progress 2025/05/12 19:19:31 Clone in progress: false 2025/05/12 19:19:31 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:19:33 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:33 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:19:37 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 175ms (175ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:12 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:19 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-9bba503a-e576-4faa-aca7-61d616b6421a" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:21 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:21 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 210ms (210ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:21 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:21 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 186ms (186ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 239ms (239ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:23 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 197ms (197ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:42 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:20:41 Waiting for MySQL ready state 2025/05/12 19:20:41 MySQL is ready 2025/05/12 19:20:41 Peers: [3764323863363633.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3965343365653430.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6262303032663465.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:20:41 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:20:41 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:20:41 lookup demand-backup-mysql-2 [10.93.0.21] 2025/05/12 19:20:41 PodIP: 10.93.0.21 2025/05/12 19:20:41 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.12] 2025/05/12 19:20:41 PrimaryIP: 10.93.1.12 2025/05/12 19:20:41 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:20:41 Opening connection to 10.93.0.21 2025/05/12 19:20:41 Clone required: true 2025/05/12 19:20:41 Checking if a clone in progress 2025/05/12 19:20:41 Clone in progress: false 2025/05/12 19:20:41 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:20:42 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:42 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:20:46 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 216ms (216ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-demand-backup-s3-aws-s3-sch7l to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 223ms (223ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:29 +0000 UTC Normal Job.batch xb-demand-backup-s3-aws-s3 SuccessfulCreate Created pod: xb-demand-backup-s3-aws-s3-sch7l job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:31 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:31 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 245ms (245ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:32 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:32 +0000 UTC Normal Pod xb-demand-backup-s3-aws-s3-sch7l.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:37 +0000 UTC Normal Job.batch xb-demand-backup-s3-aws-s3 Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:50 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:51 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:51 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:52 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:52 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:54 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:54 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:59 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:59 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:21:59 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:00 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/05/12 19:22:00 MySQL state is not ready... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:05 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:20 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:20 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:51 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:22:51 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:27 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-restore-demand-backup-restore-s3-wnvhn to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:27 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-s3 SuccessfulCreate Created pod: xb-restore-demand-backup-restore-s3-wnvhn job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:34 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:37 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:38 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 218ms (218ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:38 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:38 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:40 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:40 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 228ms (228ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:40 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:40 +0000 UTC Normal Pod xb-restore-demand-backup-restore-s3-wnvhn.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:52 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:52 +0000 UTC Warning Pod demand-backup-mysql-0 FailedAttachVolume Multi-Attach error for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:52 +0000 UTC Normal Job.batch xb-restore-demand-backup-restore-s3 Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:53 +0000 UTC Normal Pod demand-backup-orc-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:53 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:54 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 242ms (242ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:54 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:54 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 219ms (219ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 231ms (231ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:23:56 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:07 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 258ms (258ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:12 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:13 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 301ms (301ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:14 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:15 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 237ms (237ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 225ms (225ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 230ms (230ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:16 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:28 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:29 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:29 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 233ms (233ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:29 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:29 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 246ms (246ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 234ms (234ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:31 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:42 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 248ms (248ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:49 +0000 UTC Normal Pod demand-backup-mysql-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:52 +0000 UTC Normal Pod demand-backup-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:53 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:53 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 241ms (241ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:53 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:53 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 222ms (222ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 213ms (213ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:55 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:56 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:56 +0000 UTC Normal Pod demand-backup-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:56 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ad357f79-5c59-4fe2-bad8-253e839259b9" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:57 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:57 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 257ms (257ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:57 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:57 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 245ms (245ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 225ms (225ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 238ms (238ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:24:59 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:00 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:00 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 280ms (280ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 225ms (225ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:01 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 243ms (243ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 225ms (225ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:02 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 238ms (238ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:03 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 226ms (226ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:04 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:04 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:04 +0000 UTC Normal Pod demand-backup-orc-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:05 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:05 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 288ms (288ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:05 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:05 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 220ms (220ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 282ms (282ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:07 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:08 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:18 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 238ms (238ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:20 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:25:19 Waiting for MySQL ready state 2025/05/12 19:25:19 MySQL is ready 2025/05/12 19:25:19 Peers: [3833303061643535.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6331333130373039.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:25:19 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:25:19 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:25:19 lookup demand-backup-mysql-1 [10.93.2.25] 2025/05/12 19:25:19 PodIP: 10.93.2.25 2025/05/12 19:25:19 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.15] 2025/05/12 19:25:19 PrimaryIP: 10.93.1.15 2025/05/12 19:25:19 Donor: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:25:19 Opening connection to 10.93.2.25 2025/05/12 19:25:19 Clone required: true 2025/05/12 19:25:19 Checking if a clone in progress 2025/05/12 19:25:19 Clone in progress: false 2025/05/12 19:25:19 Cloning from demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:25:20 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:20 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 258ms (258ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:25:54 +0000 UTC Normal Pod demand-backup-mysql-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-c0gv default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:02 +0000 UTC Normal Pod demand-backup-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-9bba503a-e576-4faa-aca7-61d616b6421a" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:03 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:03 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 236ms (236ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:04 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:04 +0000 UTC Normal Pod demand-backup-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 187ms (187ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:05 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 244ms (244ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 189ms (189ms including waiting). Image size: 131819652 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:06 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:24 +0000 UTC Warning Pod demand-backup-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/05/12 19:26:23 Waiting for MySQL ready state 2025/05/12 19:26:23 MySQL is ready 2025/05/12 19:26:23 Peers: [3164343630313636.demand-backup-mysql-unready.kuttl-test-relative-reindeer 3833303061643535.demand-backup-mysql-unready.kuttl-test-relative-reindeer 6331333130373039.demand-backup-mysql-unready.kuttl-test-relative-reindeer] 2025/05/12 19:26:23 FQDN: demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:26:23 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer demand-backup-mysql-2.demand-backup-mysql.kuttl-test-relative-reindeer] 2025/05/12 19:26:23 lookup demand-backup-mysql-2 [10.93.0.24] 2025/05/12 19:26:23 PodIP: 10.93.0.24 2025/05/12 19:26:23 lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-relative-reindeer [10.93.1.15] 2025/05/12 19:26:23 PrimaryIP: 10.93.1.15 2025/05/12 19:26:23 Donor: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:26:23 Opening connection to 10.93.0.24 2025/05/12 19:26:23 Clone required: true 2025/05/12 19:26:23 Checking if a clone in progress 2025/05/12 19:26:23 Clone in progress: false 2025/05/12 19:26:23 Cloning from demand-backup-mysql-1.demand-backup-mysql.kuttl-test-relative-reindeer 2025/05/12 19:26:24 Clone finished. Restarting container... kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:24 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:26:27 +0000 UTC Normal Pod demand-backup-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 179ms (179ms including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:08 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/xb-demand-backup-gcp-gcp-cs-slh9n to gke-jen-ps-893-8b3e0608--default-pool-81577378-887w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:08 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.initContainers{xtrabackup-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:08 +0000 UTC Normal Job.batch xb-demand-backup-gcp-gcp-cs SuccessfulCreate Created pod: xb-demand-backup-gcp-gcp-cs-slh9n job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:09 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.initContainers{xtrabackup-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 208ms (208ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:09 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.initContainers{xtrabackup-init} Created Created container: xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:09 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.initContainers{xtrabackup-init} Started Started container xtrabackup-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:10 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:10 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:10 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Warning Pod demand-backup-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 270ms (270ms including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:11 +0000 UTC Normal Pod xb-demand-backup-gcp-gcp-cs-slh9n.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:12 +0000 UTC Normal Pod demand-backup-haproxy-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:13 +0000 UTC Warning Pod demand-backup-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:17 +0000 UTC Normal Job.batch xb-demand-backup-gcp-gcp-cs Completed Job completed job-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:27 +0000 UTC Normal Pod demand-backup-mysql-0 TaintManagerEviction Cancelling deletion of Pod kuttl-test-relative-reindeer/demand-backup-mysql-0 taint-eviction-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:27 +0000 UTC Normal Pod demand-backup-orc-1 TaintManagerEviction Cancelling deletion of Pod kuttl-test-relative-reindeer/demand-backup-orc-1 taint-eviction-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:27 +0000 UTC Warning Pod demand-backup-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:28 +0000 UTC Warning Pod demand-backup-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:28 +0000 UTC Normal Pod demand-backup-mysql-0 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:28 +0000 UTC Normal Pod demand-backup-orc-1 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:29 +0000 UTC Warning Pod demand-backup-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:27:29 +0000 UTC Warning Pod demand-backup-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:48 +0000 UTC Warning Pod demand-backup-mysql-0 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had volume node affinity conflict, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:48 +0000 UTC Normal Pod demand-backup-orc-1 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-orc-1 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:49 +0000 UTC Normal Pod demand-backup-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-haproxy-2 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:50 +0000 UTC Warning Pod demand-backup-haproxy-2 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:50 +0000 UTC Warning Pod demand-backup-haproxy-2 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:50 +0000 UTC Warning Pod demand-backup-haproxy-2 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:50 +0000 UTC Warning Pod demand-backup-orc-1 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:50 +0000 UTC Warning Pod demand-backup-orc-1 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:51 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:51 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 5.774s (5.774s including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 5.703s (5.754s including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:57 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:28:59 +0000 UTC Normal Pod demand-backup-mysql-0 Binding Scheduled Successfully assigned kuttl-test-relative-reindeer/demand-backup-mysql-0 to gke-jen-ps-893-8b3e0608--default-pool-81577378-8f5w default-scheduler logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:04 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:04 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-53e287cd-b8c5-4e71-9d11-b09e92eb6dbb" attachdetach-controller logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 2.424s (2.424s including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 192ms (192ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:07 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:08 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 4.222s (4.222s including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:08 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:09 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:09 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:09 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 211ms (211ms including waiting). Image size: 102560607 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:09 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:09 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-893-8b3e0608" in 255ms (255ms including waiting). Image size: 107824963 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:11 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:15 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:20 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 237ms (237ms including waiting). Image size: 72305256 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 23.246s (23.246s including waiting). Image size: 383656261 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:38 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:29:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:30:10 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 31.74s (31.74s including waiting). Image size: 425663155 bytes. kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:30:10 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:30:10 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 19:30:14 | demand-backup | 2025-05-12 19:30:10 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 19:30:15 | demand-backup | Deleting namespace: kuttl-test-relative-reindeer case.go:116: context deadline exceeded === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- FAIL: kuttl (1973.60s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/demand-backup (1973.16s) FAIL