=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.29.10.236 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/demand-backup === PAUSE kuttl/harness/demand-backup === CONT kuttl/harness/demand-backup logger.go:42: 21:42:02 | demand-backup | Creating namespace: kuttl-test-apparent-pheasant logger.go:42: 21:42:02 | demand-backup/0-minio-secret | starting test step 0-minio-secret logger.go:42: 21:42:03 | demand-backup/0-minio-secret | Secret:kuttl-test-apparent-pheasant/minio-secret created logger.go:42: 21:42:03 | demand-backup/0-minio-secret | test step completed 0-minio-secret logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | starting test step 1-deploy-operator logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep apply_s3_storage_secrets deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client deploy_minio] logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + source ../../functions logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ realpath ../../.. logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++++ pwd logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/tests/demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++ test_name=demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/vars.sh logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export GIT_BRANCH=PR-676 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ GIT_BRANCH=PR-676 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export VERSION=PR-676-5e3c84d9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ VERSION=PR-676-5e3c84d9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++++ which gdate logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-676/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | ++++ which date logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ date=/usr/bin/date logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ command -v oc logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ kubectl get nodes logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | +++ grep '^minikube' logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + init_temp_dir logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + rm -rf /tmp/kuttl/ps/demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + mkdir -p /tmp/kuttl/ps/demand-backup logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + apply_s3_storage_secrets logger.go:42: 21:42:03 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/minio-secret.yml logger.go:42: 21:42:04 | demand-backup/1-deploy-operator | Warning: resource secrets/minio-secret is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. logger.go:42: 21:42:04 | demand-backup/1-deploy-operator | secret/minio-secret configured logger.go:42: 21:42:04 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/cloud-secret.yml logger.go:42: 21:42:05 | demand-backup/1-deploy-operator | secret/aws-s3-secret created logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | secret/gcp-cs-secret created logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | secret/azure-secret created logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | + deploy_operator logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | + destroy_operator logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 21:42:06 | demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 21:42:07 | demand-backup/1-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 21:42:07 | demand-backup/1-deploy-operator | namespace "ps-operator" force deleted logger.go:42: 21:42:12 | demand-backup/1-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 21:42:12 | demand-backup/1-deploy-operator | + create_namespace ps-operator logger.go:42: 21:42:12 | demand-backup/1-deploy-operator | + local namespace=ps-operator logger.go:42: 21:42:12 | demand-backup/1-deploy-operator | + [[ -n '' ]] logger.go:42: 21:42:12 | demand-backup/1-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 21:42:13 | demand-backup/1-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 21:42:13 | demand-backup/1-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 21:42:13 | demand-backup/1-deploy-operator | namespace/ps-operator created logger.go:42: 21:42:13 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/crd.yaml logger.go:42: 21:42:14 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 21:42:14 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 21:42:15 | demand-backup/1-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 21:42:15 | demand-backup/1-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 21:42:15 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cw-rbac.yaml logger.go:42: 21:42:16 | demand-backup/1-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 21:42:16 | demand-backup/1-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:42:17 | demand-backup/1-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-676-5e3c84d9"' /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cw-operator.yaml logger.go:42: 21:42:18 | demand-backup/1-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 21:42:18 | demand-backup/1-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 21:42:18 | demand-backup/1-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 21:42:18 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/secrets.yaml logger.go:42: 21:42:19 | demand-backup/1-deploy-operator | secret/test-secrets created logger.go:42: 21:42:19 | demand-backup/1-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 21:42:19 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/ssl-secret.yaml logger.go:42: 21:42:20 | demand-backup/1-deploy-operator | secret/test-ssl created logger.go:42: 21:42:20 | demand-backup/1-deploy-operator | + deploy_client logger.go:42: 21:42:20 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/client.yaml logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | pod/mysql-client created logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | + deploy_minio logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | + local access_key logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | + local secret_key logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-apparent-pheasant get secret minio-secret -o 'jsonpath={.data.AWS_ACCESS_KEY_ID}' logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | + access_key=some-access-key logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-apparent-pheasant get secret minio-secret -o 'jsonpath={.data.AWS_SECRET_ACCESS_KEY}' logger.go:42: 21:42:21 | demand-backup/1-deploy-operator | ++ base64 -d logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + secret_key=some-secret-key logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + helm uninstall -n kuttl-test-apparent-pheasant minio-service logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | Error: uninstall: Release not loaded: minio-service: release: not found logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + : logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + helm repo remove minio logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | "minio" has been removed from your repositories logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + helm repo add minio https://charts.min.io/ logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | "minio" has been added to your repositories logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | +++ printf %q some-access-key logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | ++ printf %q some-access-key logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | +++ printf %q some-secret-key logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | ++ printf %q some-secret-key logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + retry 10 60 helm install minio-service -n kuttl-test-apparent-pheasant --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + local max=10 logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + local delay=60 logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + shift 2 logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + local n=1 logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | + helm install minio-service -n kuttl-test-apparent-pheasant --version 5.0.14 --set replicas=1 --set mode=standalone --set resources.requests.memory=256Mi --set rootUser=rootuser --set rootPassword=rootpass123 --set 'users[0].accessKey=some-access-key' --set 'users[0].secretKey=some-secret-key' --set 'users[0].policy=consoleAdmin' --set service.type=ClusterIP --set configPathmc=/tmp/.minio/ --set persistence.size=2G --set securityContext.enabled=false minio/minio logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:42:22 | demand-backup/1-deploy-operator | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | NAME: minio-service logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | LAST DEPLOYED: Wed Jan 15 21:42:23 2025 logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | NAMESPACE: kuttl-test-apparent-pheasant logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | STATUS: deployed logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | REVISION: 1 logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | TEST SUITE: None logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | NOTES: logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | MinIO can be accessed via port 9000 on the following DNS name from within your cluster: logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | minio-service.kuttl-test-apparent-pheasant.svc.cluster.local logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | To access MinIO from localhost, run the below commands: logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | 1. export POD_NAME=$(kubectl get pods --namespace kuttl-test-apparent-pheasant -l "release=minio-service" -o jsonpath="{.items[0].metadata.name}") logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | 2. kubectl port-forward $POD_NAME 9000 --namespace kuttl-test-apparent-pheasant logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubectl/kubectl_port-forward/ logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | 2. export MC_HOST_minio-service-local=http://$(kubectl get secret --namespace kuttl-test-apparent-pheasant minio-service -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace kuttl-test-apparent-pheasant minio-service -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:9000 logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | 3. mc ls minio-service-local logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | ++ kubectl -n kuttl-test-apparent-pheasant get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}' logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | + MINIO_POD=minio-service-847fc8bb8d-5k92w logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | + wait_pod minio-service-847fc8bb8d-5k92w logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | + local pod=minio-service-847fc8bb8d-5k92w logger.go:42: 21:43:12 | demand-backup/1-deploy-operator | + set +o xtrace logger.go:42: 21:43:13 | demand-backup/1-deploy-operator | minio-service-847fc8bb8d-5k92wtrue logger.go:42: 21:43:13 | demand-backup/1-deploy-operator | + kubectl -n kuttl-test-apparent-pheasant run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- bash -c 'AWS_ACCESS_KEY_ID='\''some-access-key'\'' AWS_SECRET_ACCESS_KEY='\''some-secret-key'\'' AWS_DEFAULT_REGION=us-east-1 /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' logger.go:42: 21:43:17 | demand-backup/1-deploy-operator | If you don't see a command prompt, try pressing enter. logger.go:42: 21:43:18 | demand-backup/1-deploy-operator | warning: couldn't attach to pod/aws-cli, falling back to streaming logs: unable to upgrade connection: container aws-cli not found in pod aws-cli_kuttl-test-apparent-pheasant logger.go:42: 21:43:18 | demand-backup/1-deploy-operator | make_bucket: operator-testing logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | pod "aws-cli" deleted [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 36 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc00032dc00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc00032dc00}, 0x0}, {0x184a055?, 0xc000787f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc0004b89a0, {0x1accd90, 0xc0005b2040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc000129b08?, {0x0, 0xc0004b89a0, {0x1accd90, 0xc0005b2040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc000129b08, {0x0, 0xc0004b89a0, {0x1accd90, 0xc0005b2040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0003c9208, 0xe6?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0006c4000, 0xc0003c1520, {0xc0006d3300, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0006c4000, 0xc0003c1520, {0xc0006d3300, 0x1c}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0000a8a00, 0xc0003c1520, 0xc00048cbd0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0003c1520) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc0003c1520, 0xc00019e5e8) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 35 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | INFO Found 1 resource(s). logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | ASSERT PASS logger.go:42: 21:43:21 | demand-backup/1-deploy-operator | test step completed 1-deploy-operator logger.go:42: 21:43:21 | demand-backup/2-create-cluster | starting test step 2-create-cluster logger.go:42: 21:43:21 | demand-backup/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval ".spec.mysql.clusterType=\"async\"" - \ | yq eval ".spec.mysql.size=3" - \ | yq eval ".spec.proxy.haproxy.enabled=true" - \ | yq eval ".spec.proxy.haproxy.size=3" - \ | yq eval ".spec.orchestrator.enabled=true" - \ | yq eval ".spec.orchestrator.size=3" - \ | yq eval ".spec.backup.storages.minio.type=\"s3\"" - \ | yq eval ".spec.backup.storages.minio.s3.bucket=\"operator-testing\"" - \ | yq eval ".spec.backup.storages.minio.s3.credentialsSecret=\"minio-secret\"" - \ | yq eval ".spec.backup.storages.minio.s3.endpointUrl=\"http://minio-service.${NAMESPACE}:9000\"" - \ | yq eval ".spec.backup.storages.minio.s3.region=\"us-east-1\"" - \ | yq eval ".spec.backup.storages.aws-s3.type=\"s3\"" - \ | yq eval ".spec.backup.storages.aws-s3.verifyTLS=true" - \ | yq eval ".spec.backup.storages.aws-s3.s3.bucket=\"operator-testing\"" - \ | yq eval ".spec.backup.storages.aws-s3.s3.credentialsSecret=\"aws-s3-secret\"" - \ | yq eval ".spec.backup.storages.aws-s3.s3.region=\"us-east-1\"" - \ | yq eval ".spec.backup.storages.aws-s3.s3.prefix=\"ps\"" - \ | yq eval ".spec.backup.storages.gcp-cs.type=\"gcs\"" - \ | yq eval ".spec.backup.storages.gcp-cs.verifyTLS=true" - \ | yq eval ".spec.backup.storages.gcp-cs.gcs.bucket=\"operator-testing\"" - \ | yq eval ".spec.backup.storages.gcp-cs.gcs.credentialsSecret=\"gcp-cs-secret\"" - \ | yq eval ".spec.backup.storages.gcp-cs.gcs.endpointUrl=\"https://storage.googleapis.com\"" - \ | yq eval ".spec.backup.storages.gcp-cs.gcs.prefix=\"ps\"" - \ | yq eval ".spec.backup.storages.azure-blob.type=\"azure\"" - \ | yq eval ".spec.backup.storages.azure-blob.verifyTLS=true" - \ | yq eval ".spec.backup.storages.azure-blob.azure.containerName=\"operator-testing\"" - \ | yq eval ".spec.backup.storages.azure-blob.azure.credentialsSecret=\"azure-secret\"" - \ | yq eval ".spec.backup.storages.azure-blob.azure.prefix=\"ps\"" - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 21:43:21 | demand-backup/2-create-cluster | + source ../../functions logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ realpath ../../.. logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++++ pwd logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/tests/demand-backup logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++ test_name=demand-backup logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/vars.sh logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/demand-backup logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export GIT_BRANCH=PR-676 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ GIT_BRANCH=PR-676 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export VERSION=PR-676-5e3c84d9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ VERSION=PR-676-5e3c84d9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++++ which gdate logger.go:42: 21:43:21 | demand-backup/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-676/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 21:43:21 | demand-backup/2-create-cluster | ++++ which date logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ command -v oc logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ kubectl get nodes logger.go:42: 21:43:21 | demand-backup/2-create-cluster | +++ grep '^minikube' logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + get_cr logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + local name_suffix= logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.bucket="operator-testing"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + '[' -n '' ']' logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.type="s3"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.type="s3"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.bucket="operator-testing"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.bucket="operator-testing"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.credentialsSecret="minio-secret"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.credentialsSecret="aws-s3-secret"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.endpointUrl="http://minio-service.kuttl-test-apparent-pheasant:9000"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.minio.s3.region="us-east-1"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.prefix="ps"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.type="gcs"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.aws-s3.verifyTLS=true - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.aws-s3.s3.region="us-east-1"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.gcp-cs.verifyTLS=true - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.credentialsSecret="gcp-cs-secret"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.prefix="ps"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.type="azure"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' percona/percona-mysql-router:8.4 logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.proxy.router.image="percona/percona-mysql-router:8.4"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval .spec.backup.storages.azure-blob.verifyTLS=true - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + kubectl -n kuttl-test-apparent-pheasant apply -f - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-676-5e3c84d9"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.mysql.image="%s"' percona/percona-server:8.4 logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.mysql.image="percona/percona-server:8.4"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.backup.image="%s"' percona/percona-xtrabackup:8.4 logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.image="percona/percona-xtrabackup:8.4"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.gcp-cs.gcs.endpointUrl="https://storage.googleapis.com"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.containerName="operator-testing"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.credentialsSecret="azure-secret"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.backup.storages.azure-blob.azure.prefix="ps"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.metadata.name="%s"' demand-backup logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.metadata.name="demand-backup"' /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cr.yaml logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 21:43:22 | demand-backup/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 21:43:23 | demand-backup/2-create-cluster | perconaservermysql.ps.percona.com/demand-backup created logger.go:42: 21:50:24 | demand-backup/2-create-cluster | test step failed 2-create-cluster case.go:378: failed in step 2-create-cluster case.go:380: --- StatefulSet:kuttl-test-apparent-pheasant/demand-backup-mysql +++ StatefulSet:kuttl-test-apparent-pheasant/demand-backup-mysql @@ -1,13 +1,32 @@ apiVersion: apps/v1 kind: StatefulSet metadata: + annotations: + percona.com/last-config-hash: d37ad5285d5f349a1d8cea01178d70f3 + labels: + app.kubernetes.io/component: mysql + app.kubernetes.io/instance: demand-backup + app.kubernetes.io/managed-by: percona-server-operator + app.kubernetes.io/name: percona-server + app.kubernetes.io/part-of: percona-server + managedFields: '[... elided field over 10 lines long ...]' name: demand-backup-mysql namespace: kuttl-test-apparent-pheasant + ownerReferences: + - apiVersion: ps.percona.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: PerconaServerMySQL + name: demand-backup + uid: 2c9f65c2-58da-4433-94a3-33a5388e492d +spec: '[... elided field over 10 lines long ...]' status: + availableReplicas: 0 collisionCount: 0 - currentReplicas: 3 + currentReplicas: 2 + currentRevision: demand-backup-mysql-59cbbf8ffc observedGeneration: 1 - readyReplicas: 3 - replicas: 3 - updatedReplicas: 3 + replicas: 2 + updateRevision: demand-backup-mysql-59cbbf8ffc + updatedReplicas: 2 case.go:380: resource StatefulSet:kuttl-test-apparent-pheasant/demand-backup-mysql: .status.currentReplicas: value mismatch, expected: 3 != actual: 2 case.go:380: --- PerconaServerMySQL:kuttl-test-apparent-pheasant/demand-backup +++ PerconaServerMySQL:kuttl-test-apparent-pheasant/demand-backup @@ -1,22 +1,29 @@ apiVersion: ps.percona.com/v1alpha1 kind: PerconaServerMySQL metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ps.percona.com/v1alpha1","kind":"PerconaServerMySQL","metadata":{"annotations":{},"finalizers":["percona.com/delete-mysql-pods-in-order"],"name":"demand-backup","namespace":"kuttl-test-apparent-pheasant"},"spec":{"backup":{"enabled":true,"image":"percona/percona-xtrabackup:8.4","imagePullPolicy":"Always","storages":{"aws-s3":{"s3":{"bucket":"operator-testing","credentialsSecret":"aws-s3-secret","prefix":"ps","region":"us-east-1"},"type":"s3","verifyTLS":true},"azure-blob":{"azure":{"containerName":"operator-testing","credentialsSecret":"azure-secret","prefix":"ps"},"type":"azure","verifyTLS":true},"gcp-cs":{"gcs":{"bucket":"operator-testing","credentialsSecret":"gcp-cs-secret","endpointUrl":"https://storage.googleapis.com","prefix":"ps"},"type":"gcs","verifyTLS":true},"minio":{"s3":{"bucket":"operator-testing","credentialsSecret":"minio-secret","endpointUrl":"http://minio-service.kuttl-test-apparent-pheasant:9000","region":"us-east-1"},"type":"s3"},"s3-us-west":{"s3":{"bucket":"S3-BACKUP-BUCKET-NAME-HERE","credentialsSecret":"cluster1-s3-credentials","region":"us-west-2"},"type":"s3","verifyTLS":true}}},"crVersion":"0.9.0","initImage":"perconalab/percona-server-mysql-operator:PR-676-5e3c84d9","mysql":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"autoRecovery":true,"clusterType":"async","image":"percona/percona-server:8.4","imagePullPolicy":"Always","resources":{"limits":{"memory":"2G"},"requests":{"memory":"1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"2G"}}}}},"orchestrator":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-orchestrator","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"128M"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1G"}}}}},"pmm":{"enabled":false,"image":"perconalab/pmm-client:dev-latest","imagePullPolicy":"Always","resources":{"requests":{"cpu":"300m","memory":"150M"}},"serverHost":"monitoring-service","serverUser":"admin"},"proxy":{"haproxy":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-haproxy","imagePullPolicy":"Always","resources":{"requests":{"cpu":"600m","memory":"1G"}},"size":3},"router":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":false,"image":"percona/percona-mysql-router:8.4","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"256M"}},"size":3}},"secretsName":"test-secrets","sslSecretName":"test-ssl","toolkit":{"image":"perconalab/percona-server-mysql-operator:main-toolkit","imagePullPolicy":"Always"},"updateStrategy":"SmartUpdate","upgradeOptions":{"apply":"disabled","versionServiceEndpoint":"https://check.percona.com"}}} finalizers: - percona.com/delete-mysql-pods-in-order + managedFields: '[... elided field over 10 lines long ...]' name: demand-backup namespace: kuttl-test-apparent-pheasant +spec: '[... elided field over 10 lines long ...]' status: + conditions: '[... elided field over 10 lines long ...]' haproxy: ready: 3 size: 3 state: ready + host: demand-backup-haproxy.kuttl-test-apparent-pheasant mysql: - ready: 3 size: 3 - state: ready + state: initializing orchestrator: ready: 3 size: 3 state: ready - state: ready + router: {} + state: initializing case.go:380: resource PerconaServerMySQL:kuttl-test-apparent-pheasant/demand-backup: .status.state: value mismatch, expected: ready != actual: initializing logger.go:42: 21:50:24 | demand-backup | demand-backup events from ns kuttl-test-apparent-pheasant: logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:21 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-apparent-pheasant/mysql-client to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:22 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal ReplicaSet.apps minio-service-847fc8bb8d SuccessfulCreate Created pod: minio-service-847fc8bb8d-5k92w replicaset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal Job.batch minio-service-post-job SuccessfulCreate Created pod: minio-service-post-job-dchjv job-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal PersistentVolumeClaim minio-service WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal Deployment.apps minio-service ScalingReplicaSet Scaled up replica set minio-service-847fc8bb8d to 1 deployment-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal PersistentVolumeClaim minio-service ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:27 +0000 UTC Normal PersistentVolumeClaim minio-service Provisioning External provisioner is provisioning volume for claim "kuttl-test-apparent-pheasant/minio-service" pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:28 +0000 UTC Normal Pod minio-service-post-job-dchjv Scheduled Successfully assigned kuttl-test-apparent-pheasant/minio-service-post-job-dchjv to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:28 +0000 UTC Normal Pod minio-service-post-job-dchjv.spec.containers{minio-make-user} Pulling Pulling image "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:31 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w Scheduled Successfully assigned kuttl-test-apparent-pheasant/minio-service-847fc8bb8d-5k92w to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:31 +0000 UTC Normal PersistentVolumeClaim minio-service ProvisioningSucceeded Successfully provisioned volume pvc-792561cd-4b5b-425a-bfd5-d9cabfe955cc pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:38 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-792561cd-4b5b-425a-bfd5-d9cabfe955cc" attachdetach-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:40 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w.spec.containers{minio} Pulling Pulling image "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w.spec.containers{minio} Pulled Successfully pulled image "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z" in 12.553s (12.553s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w.spec.containers{minio} Created Created container minio kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-847fc8bb8d-5k92w.spec.containers{minio} Started Started container minio kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-post-job-dchjv.spec.containers{minio-make-user} Pulled Successfully pulled image "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z" in 23.635s (23.635s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-post-job-dchjv.spec.containers{minio-make-user} Created Created container minio-make-user kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod minio-service-post-job-dchjv.spec.containers{minio-make-user} Started Started container minio-make-user kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "percona/percona-server:8.4" in 30.085s (30.085s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:42:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:11 +0000 UTC Normal Job.batch minio-service-post-job Completed Job completed job-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:13 +0000 UTC Normal Pod aws-cli Scheduled Successfully assigned kuttl-test-apparent-pheasant/aws-cli to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:13 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulling Pulling image "perconalab/awscli" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:17 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Pulled Successfully pulled image "perconalab/awscli" in 3.085s (3.085s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:17 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Created Created container aws-cli kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:17 +0000 UTC Normal Pod aws-cli.spec.containers{aws-cli} Started Started container aws-cli kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-apparent-pheasant/datadir-demand-backup-mysql-0" pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-0 Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-0 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal Pod demand-backup-orc-0 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-orc-0 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:24 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-0 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 132ms (132ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:25 +0000 UTC Normal Pod demand-backup-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:26 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:26 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 98ms (98ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:26 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:27 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:27 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:27 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 105ms (105ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:27 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:27 +0000 UTC Normal Pod demand-backup-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:28 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-7e3b1833-299f-41d5-90a7-e5962ffe3439 pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:28 +0000 UTC Normal Pod demand-backup-mysql-0 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-mysql-0 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-nlkq default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:35 +0000 UTC Normal Pod demand-backup-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-7e3b1833-299f-41d5-90a7-e5962ffe3439" attachdetach-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 168ms (168ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:37 +0000 UTC Normal Pod demand-backup-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 112ms (112ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.4" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.4" in 123ms (123ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:39 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 145ms (145ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:43:40 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal Pod demand-backup-orc-1 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-orc-1 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-jwxm default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 190ms (190ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal Pod demand-backup-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:00 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-1 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:02 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 135ms (135ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 109ms (109ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:03 +0000 UTC Normal Pod demand-backup-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:10 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 132ms (132ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:11 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:11 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:11 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-apparent-pheasant/datadir-demand-backup-mysql-1" pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:11 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Claim datadir-demand-backup-mysql-1 Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql success statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:11 +0000 UTC Normal StatefulSet.apps demand-backup-mysql SuccessfulCreate create Pod demand-backup-mysql-1 in StatefulSet demand-backup-mysql successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:15 +0000 UTC Normal PersistentVolumeClaim datadir-demand-backup-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-43aec206-c998-4a15-9baf-6214b193f050 pd.csi.storage.gke.io_gke-a176d70a9b5b41da927c-39e5-f610-vm_ceeaa126-1b50-42e3-9f4e-c6cb8f246c46 logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:15 +0000 UTC Normal Pod demand-backup-mysql-1 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-mysql-1 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal Pod demand-backup-haproxy-0 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-haproxy-0 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-97p8 default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 136ms (136ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:16 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-0 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 121ms (121ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 137ms (137ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:18 +0000 UTC Normal Pod demand-backup-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal Pod demand-backup-haproxy-1 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-haproxy-1 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-jwxm default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 162ms (162ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:19 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-1 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:21 +0000 UTC Warning Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod demand-backup-mysql-0_kuttl-test-apparent-pheasant(43bdafbe-eeac-426d-8137-9348249c18ba) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 125ms (125ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 124ms (124ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:22 +0000 UTC Normal Pod demand-backup-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-haproxy-2 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-haproxy-2 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-nlkq default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 176ms (176ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal StatefulSet.apps demand-backup-haproxy SuccessfulCreate create Pod demand-backup-haproxy-2 in StatefulSet demand-backup-haproxy successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:23 +0000 UTC Normal Pod demand-backup-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-43aec206-c998-4a15-9baf-6214b193f050" attachdetach-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 128ms (128ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:24 +0000 UTC Normal Pod demand-backup-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 112ms (112ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:25 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 119ms (119ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 110ms (110ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:26 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.4" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:34 +0000 UTC Normal Pod demand-backup-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 99ms (99ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:35 +0000 UTC Normal Pod demand-backup-orc-2 Scheduled Successfully assigned kuttl-test-apparent-pheasant/demand-backup-orc-2 to gke-jen-ps-676-5e3c84d9--default-pool-25961414-nlkq default-scheduler logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:35 +0000 UTC Normal StatefulSet.apps demand-backup-orc SuccessfulCreate create Pod demand-backup-orc-2 in StatefulSet demand-backup-orc successful statefulset-controller logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:36 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:36 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 149ms (149ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:36 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:36 +0000 UTC Normal Pod demand-backup-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:37 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:37 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 106ms (106ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:37 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:37 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:37 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:38 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 122ms (122ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:38 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:38 +0000 UTC Normal Pod demand-backup-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:40 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.4" in 13.811s (13.811s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:40 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:40 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:40 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:44 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 4.226s (4.226s including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:44 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:44 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:54 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/01/15 21:44:54 Peers: [3364396136343732.demand-backup-mysql-unready.kuttl-test-apparent-pheasant 3439353634313033.demand-backup-mysql-unready.kuttl-test-apparent-pheasant] 2025/01/15 21:44:54 FQDN: demand-backup-mysql-1.demand-backup-mysql.kuttl-test-apparent-pheasant 2025/01/15 21:44:54 Primary: demand-backup-mysql-0.demand-backup-mysql.kuttl-test-apparent-pheasant Replicas: [demand-backup-mysql-1.demand-backup-mysql.kuttl-test-apparent-pheasant] 2025/01/15 21:44:54 lookup demand-backup-mysql-1 [10.200.32.13] 2025/01/15 21:44:54 PodIP: 10.200.32.13 2025/01/15 21:44:54 bootstrap finished in 0.083152 seconds 2025/01/15 21:44:54 bootstrap failed: get primary IP: lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-apparent-pheasant: lookup demand-backup-mysql-0.demand-backup-mysql.kuttl-test-apparent-pheasant on 10.139.160.10:53: no such host kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:55 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:58 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 144ms (144ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:44:59 +0000 UTC Normal Pod demand-backup-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 89ms (89ms including waiting) kubelet logger.go:42: 21:50:24 | demand-backup | 2025-01-15 21:49:25 +0000 UTC Warning Pod demand-backup-mysql-1.spec.containers{mysql} BackOff Back-off restarting failed container mysql in pod demand-backup-mysql-1_kuttl-test-apparent-pheasant(432773b0-7603-48ef-9e26-5b8d15da3dc1) kubelet logger.go:42: 21:50:24 | demand-backup | Deleting namespace: kuttl-test-apparent-pheasant === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- FAIL: kuttl (541.03s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/demand-backup (540.59s) FAIL