=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.42.105.230 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring === PAUSE kuttl/harness/monitoring === CONT kuttl/harness/monitoring logger.go:42: 22:35:04 | monitoring | Creating namespace: kuttl-test-major-krill logger.go:42: 22:35:04 | monitoring/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 22:35:04 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 22:35:04 | monitoring/0-deploy-operator | + source ../../functions logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ realpath ../../.. logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++++ pwd logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/tests/monitoring logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++ test_name=monitoring logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/vars.sh logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-676 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-676 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export VERSION=PR-676-5e3c84d9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ VERSION=PR-676-5e3c84d9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++++ which gdate logger.go:42: 22:35:04 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-676/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 22:35:04 | monitoring/0-deploy-operator | ++++ which date logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ command -v oc logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ kubectl get nodes logger.go:42: 22:35:04 | monitoring/0-deploy-operator | +++ grep '^minikube' logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + init_temp_dir logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + deploy_operator logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + destroy_operator logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 22:35:05 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 22:35:05 | monitoring/0-deploy-operator | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 22:35:05 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 22:35:06 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 22:35:06 | monitoring/0-deploy-operator | namespace "ps-operator" force deleted logger.go:42: 22:35:11 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 22:35:11 | monitoring/0-deploy-operator | + create_namespace ps-operator logger.go:42: 22:35:11 | monitoring/0-deploy-operator | + local namespace=ps-operator logger.go:42: 22:35:11 | monitoring/0-deploy-operator | + [[ -n '' ]] logger.go:42: 22:35:11 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 22:35:12 | monitoring/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 22:35:12 | monitoring/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 22:35:13 | monitoring/0-deploy-operator | namespace/ps-operator created logger.go:42: 22:35:13 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/crd.yaml logger.go:42: 22:35:13 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 22:35:14 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 22:35:15 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 22:35:15 | monitoring/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 22:35:15 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cw-rbac.yaml logger.go:42: 22:35:15 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 22:35:16 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 22:35:16 | monitoring/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 22:35:16 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 22:35:16 | monitoring/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 22:35:16 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 22:35:16 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 22:35:16 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 22:35:16 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:35:16 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-676-5e3c84d9"' /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cw-operator.yaml logger.go:42: 22:35:17 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 22:35:18 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 22:35:18 | monitoring/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 22:35:18 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-major-krill apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/secrets.yaml logger.go:42: 22:35:19 | monitoring/0-deploy-operator | secret/test-secrets created logger.go:42: 22:35:19 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 22:35:19 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-major-krill apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/ssl-secret.yaml logger.go:42: 22:35:19 | monitoring/0-deploy-operator | secret/test-ssl created logger.go:42: 22:35:19 | monitoring/0-deploy-operator | + deploy_client logger.go:42: 22:35:19 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-major-krill apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf/client.yaml logger.go:42: 22:35:20 | monitoring/0-deploy-operator | pod/mysql-client created logger.go:42: 22:35:21 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:21 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:21 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 22:35:22 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:23 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:23 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 22:35:24 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:24 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:25 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 22:35:26 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:26 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:26 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 22:35:28 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:28 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:28 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 22:35:29 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 22:35:29 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 22:35:30 | monitoring/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 22:35:30 | monitoring/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 22:35:30 | monitoring/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 22:35:30 | monitoring/0-deploy-operator | ASSERT PASS logger.go:42: 22:35:30 | monitoring/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm_server sleep 30 # wait for PMM Server to start API_KEY=$(get_pmm_api_key) kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY'}}'] logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | + source ../../functions logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++++ pwd logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/tests/monitoring logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/vars.sh logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-676 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-676 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-676-5e3c84d9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-676-5e3c84d9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++++ which gdate logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-676/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | ++++ which date logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ date=/usr/bin/date logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ command -v oc logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | +++ grep '^minikube' logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | + deploy_pmm_server logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | + helm install monitoring -n kuttl-test-major-krill --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 22:35:30 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-676/kubeconfig logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | NAME: monitoring logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Wed Jan 15 22:35:31 2025 logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-major-krill logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | STATUS: deployed logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | REVISION: 1 logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | NOTES: logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | endpoint: https://monitoring-service.kuttl-test-major-krill.svc.cluster.local:443 logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | login: admin logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | password: admin logger.go:42: 22:35:32 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-major-krill exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 22:35:33 | monitoring/1-deploy-pmm-server | Error from server (BadRequest): pod monitoring-0 does not have a host assigned logger.go:42: 22:35:33 | monitoring/1-deploy-pmm-server | + echo 'Retry ' logger.go:42: 22:35:33 | monitoring/1-deploy-pmm-server | Retry logger.go:42: 22:35:33 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 22:35:38 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 22:35:38 | monitoring/1-deploy-pmm-server | + '[' 1 -ge 20 ']' logger.go:42: 22:35:38 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-major-krill exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 22:35:40 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 22:35:40 | monitoring/1-deploy-pmm-server | + echo 'Retry 1' logger.go:42: 22:35:40 | monitoring/1-deploy-pmm-server | Retry 1 logger.go:42: 22:35:40 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 22:35:45 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 22:35:45 | monitoring/1-deploy-pmm-server | + '[' 2 -ge 20 ']' logger.go:42: 22:35:45 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-major-krill exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 22:35:46 | monitoring/1-deploy-pmm-server | command terminated with exit code 1 logger.go:42: 22:35:46 | monitoring/1-deploy-pmm-server | + echo 'Retry 2' logger.go:42: 22:35:46 | monitoring/1-deploy-pmm-server | Retry 2 logger.go:42: 22:35:46 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 22:35:51 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 22:35:51 | monitoring/1-deploy-pmm-server | + '[' 3 -ge 20 ']' logger.go:42: 22:35:51 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-major-krill exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 22:35:53 | monitoring/1-deploy-pmm-server | + sleep 30 logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | ++ get_pmm_api_key logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | ++ local key_name= logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]] logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | ++ key_name=operator logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD logger.go:42: 22:36:23 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-major-krill exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD=admin logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | ++ jq .key logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | +++ get_service_ip monitoring-service logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | +++ local service=monitoring-service logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-major-krill -o 'jsonpath={.spec.type}' logger.go:42: 22:36:24 | monitoring/1-deploy-pmm-server | +++ grep -q NotFound logger.go:42: 22:36:25 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-major-krill -o 'jsonpath={.spec.type}' logger.go:42: 22:36:25 | monitoring/1-deploy-pmm-server | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 22:36:25 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-major-krill -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 22:36:25 | monitoring/1-deploy-pmm-server | +++ egrep -q 'hostname|ip' logger.go:42: 22:36:25 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-major-krill -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 22:36:26 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-major-krill -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 22:36:26 | monitoring/1-deploy-pmm-server | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.134.45.128/graph/api/auth/keys logger.go:42: 22:36:26 | monitoring/1-deploy-pmm-server | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 22:36:26 | monitoring/1-deploy-pmm-server | Dload Upload Total Spent Left Speed logger.go:42: 22:36:42 | monitoring/1-deploy-pmm-server | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:08 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:09 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:10 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:11 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:12 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:13 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:14 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:15 --:--:-- 0 100 155 100 119 100 36 7 2 0:00:18 0:00:15 0:00:03 34 logger.go:42: 22:36:42 | monitoring/1-deploy-pmm-server | + API_KEY='"eyJrIjoiVjh5enU1MDE5U3FFSVJmZHJaajhENnZCYmoycVhYSWQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' logger.go:42: 22:36:42 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-major-krill secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiVjh5enU1MDE5U3FFSVJmZHJaajhENnZCYmoycVhYSWQiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' logger.go:42: 22:36:42 | monitoring/1-deploy-pmm-server | secret/test-secrets patched [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 12 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc000359c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc000359c00}, 0x0}, {0x184a055?, 0xc00069df80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc0004c89a0, {0x1accd90, 0xc000358040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc00063a908?, {0x0, 0xc0004c89a0, {0x1accd90, 0xc000358040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc00063a908, {0x0, 0xc0004c89a0, {0x1accd90, 0xc000358040}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc000030308, 0x3?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc000613860, 0xc000640340, {0xc00059daa0, 0x16}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc000613860, 0xc000640340, {0xc00059daa0, 0x16}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0004c30e0, 0xc000640340, 0xc000146630) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc000640340) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc000640340, 0xc000514b88) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 11 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 22:36:43 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 22:36:43 | monitoring/2-create-cluster | starting test step 2-create-cluster logger.go:42: 22:36:43 | monitoring/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.pmm.enabled = true' - \ | yq eval '.spec.proxy.haproxy.enabled = true' - \ | yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 22:36:43 | monitoring/2-create-cluster | + source ../../functions logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ realpath ../../.. logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++++ pwd logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/tests/monitoring logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ test_name=monitoring logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/vars.sh logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-676 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-676/e2e-tests/conf logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-676 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-676 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export VERSION=PR-676-5e3c84d9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ VERSION=PR-676-5e3c84d9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=percona/percona-server:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=percona/percona-xtrabackup:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=percona/percona-mysql-router:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++++ which gdate logger.go:42: 22:36:43 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-676/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++++ which date logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ command -v oc logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ kubectl get nodes logger.go:42: 22:36:43 | monitoring/2-create-cluster | +++ grep '^minikube' logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + get_cr logger.go:42: 22:36:43 | monitoring/2-create-cluster | + local name_suffix= logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-676-5e3c84d9 logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-676-5e3c84d9"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + '[' -n '' ']' logger.go:42: 22:36:43 | monitoring/2-create-cluster | + kubectl -n kuttl-test-major-krill apply -f - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' percona/percona-mysql-router:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-676/deploy/cr.yaml logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="percona/percona-mysql-router:8.4"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' percona/percona-xtrabackup:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="percona/percona-xtrabackup:8.4"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 22:36:43 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' percona/percona-server:8.4 logger.go:42: 22:36:43 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="percona/percona-server:8.4"' - logger.go:42: 22:36:45 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created logger.go:42: 22:45:05 | monitoring/2-create-cluster | test step failed 2-create-cluster case.go:378: failed in step 2-create-cluster case.go:380: --- StatefulSet:kuttl-test-major-krill/monitoring-mysql +++ StatefulSet:kuttl-test-major-krill/monitoring-mysql @@ -1,15 +1,179 @@ apiVersion: apps/v1 kind: StatefulSet metadata: + annotations: + percona.com/last-config-hash: dd6cdf82c16767f9457ad922f4c66429 + labels: + app.kubernetes.io/component: mysql + app.kubernetes.io/instance: monitoring + app.kubernetes.io/managed-by: percona-server-operator + app.kubernetes.io/name: percona-server + app.kubernetes.io/part-of: percona-server + managedFields: '[... elided field over 10 lines long ...]' name: monitoring-mysql namespace: kuttl-test-major-krill + ownerReferences: + - apiVersion: ps.percona.com/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: PerconaServerMySQL + name: monitoring + uid: 710dcdff-5f04-4035-96fa-15eb27133dd2 spec: + persistentVolumeClaimRetentionPolicy: + whenDeleted: Retain + whenScaled: Retain + podManagementPolicy: OrderedReady + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: mysql + app.kubernetes.io/instance: monitoring + app.kubernetes.io/managed-by: percona-server-operator + app.kubernetes.io/name: percona-server + app.kubernetes.io/part-of: percona-server + serviceName: monitoring-mysql template: + metadata: + annotations: + percona.com/last-applied-tls: 2346e1a8f581baa1ab808150e6154bb5 + creationTimestamp: null + labels: + app.kubernetes.io/component: mysql + app.kubernetes.io/instance: monitoring + app.kubernetes.io/managed-by: percona-server-operator + app.kubernetes.io/name: percona-server + app.kubernetes.io/part-of: percona-server spec: + affinity: '[... elided field over 10 lines long ...]' containers: - - name: mysql - - name: xtrabackup - - name: pt-heartbeat + - args: + - mysqld + command: + - /opt/percona/ps-entrypoint.sh + env: + - name: MONITOR_HOST + value: '%!'(MISSING) + - name: SERVICE_NAME + value: monitoring-mysql + - name: SERVICE_NAME_UNREADY + value: monitoring-mysql-unready + - name: CLUSTER_HASH + value: "1102117" + - name: INNODB_CLUSTER_NAME + value: monitoring + - name: CR_UID + value: 710dcdff-5f04-4035-96fa-15eb27133dd2 + - name: CLUSTER_TYPE + value: async + image: percona/percona-server:8.4 + imagePullPolicy: Always + lifecycle: + preStop: + exec: + command: + - /opt/percona/ps-pre-stop.sh + livenessProbe: + exec: + command: + - /opt/percona/healthcheck + - liveness + failureThreshold: 3 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 10 + name: mysql + ports: + - containerPort: 3306 + name: mysql + protocol: TCP + - containerPort: 33062 + name: mysql-admin + protocol: TCP + - containerPort: 33060 + name: mysqlx + protocol: TCP + readinessProbe: + exec: + command: + - /opt/percona/healthcheck + - readiness + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + resources: + limits: + memory: 2G + requests: + memory: 1G + startupProbe: + exec: + command: + - /opt/percona/bootstrap + failureThreshold: 1 + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 43200 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + - mountPath: /var/lib/mysql + name: datadir + - mountPath: /etc/mysql/mysql-users-secret + name: users + - mountPath: /etc/mysql/mysql-tls-secret + name: tls + - mountPath: /etc/mysql/config + name: config + - command: + - /opt/percona/sidecar + image: percona/percona-xtrabackup:8.4 + imagePullPolicy: Always + name: xtrabackup + ports: + - containerPort: 6450 + name: http + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + - mountPath: /var/lib/mysql + name: datadir + - mountPath: /etc/mysql/mysql-users-secret + name: users + - mountPath: /var/log/xtrabackup + name: backup-logs + - command: + - /opt/percona/heartbeat-entrypoint.sh + env: + - name: HEARTBEAT_PASSWORD + valueFrom: + secretKeyRef: + key: heartbeat + name: internal-monitoring + image: perconalab/percona-server-mysql-operator:main-toolkit + imagePullPolicy: Always + name: pt-heartbeat + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + - mountPath: /var/lib/mysql + name: datadir + - mountPath: /etc/mysql/mysql-users-secret + name: users - env: - name: POD_NAME valueFrom: @@ -94,12 +258,53 @@ name: internal-monitoring - name: DB_ARGS value: --query-source=perfschema + image: perconalab/pmm-client:dev-latest + imagePullPolicy: Always name: pmm-client + ports: + - containerPort: 7777 + protocol: TCP + - containerPort: 30100 + protocol: TCP + - containerPort: 30101 + protocol: TCP + - containerPort: 30102 + protocol: TCP + - containerPort: 30103 + protocol: TCP + - containerPort: 30104 + protocol: TCP + - containerPort: 30105 + protocol: TCP + resources: + requests: + cpu: 300m + memory: 150M + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /opt/percona + name: bin + dnsPolicy: ClusterFirst + initContainers: '[... elided field over 10 lines long ...]' + restartPolicy: Always + schedulerName: default-scheduler + securityContext: + fsGroup: 1001 + supplementalGroups: + - 1001 + terminationGracePeriodSeconds: 30 + volumes: '[... elided field over 10 lines long ...]' + updateStrategy: + type: OnDelete + volumeClaimTemplates: '[... elided field over 10 lines long ...]' status: + availableReplicas: 0 collisionCount: 0 currentReplicas: 3 + currentRevision: monitoring-mysql-68d8ff589 observedGeneration: 1 - readyReplicas: 3 replicas: 3 + updateRevision: monitoring-mysql-68d8ff589 updatedReplicas: 3 case.go:380: resource StatefulSet:kuttl-test-major-krill/monitoring-mysql: .status.readyReplicas: key is missing from map case.go:380: --- PerconaServerMySQL:kuttl-test-major-krill/monitoring +++ PerconaServerMySQL:kuttl-test-major-krill/monitoring @@ -1,18 +1,29 @@ apiVersion: ps.percona.com/v1alpha1 kind: PerconaServerMySQL metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ps.percona.com/v1alpha1","kind":"PerconaServerMySQL","metadata":{"annotations":{},"finalizers":["percona.com/delete-mysql-pods-in-order"],"name":"monitoring","namespace":"kuttl-test-major-krill"},"spec":{"backup":{"enabled":true,"image":"percona/percona-xtrabackup:8.4","imagePullPolicy":"Always","storages":{"s3-us-west":{"s3":{"bucket":"S3-BACKUP-BUCKET-NAME-HERE","credentialsSecret":"cluster1-s3-credentials","region":"us-west-2"},"type":"s3","verifyTLS":true}}},"crVersion":"0.9.0","initImage":"perconalab/percona-server-mysql-operator:PR-676-5e3c84d9","mysql":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"autoRecovery":true,"clusterType":"async","image":"percona/percona-server:8.4","imagePullPolicy":"Always","resources":{"limits":{"memory":"2G"},"requests":{"memory":"1G"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"2G"}}}}},"orchestrator":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"image":"perconalab/percona-server-mysql-operator:main-orchestrator","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"128M"}},"size":3,"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"1G"}}}}},"pmm":{"enabled":true,"image":"perconalab/pmm-client:dev-latest","imagePullPolicy":"Always","resources":{"requests":{"cpu":"300m","memory":"150M"}},"serverHost":"monitoring-service","serverUser":"admin"},"proxy":{"haproxy":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":true,"expose":{"type":"LoadBalancer"},"image":"perconalab/percona-server-mysql-operator:main-haproxy","imagePullPolicy":"Always","resources":{"requests":{"cpu":"600m","memory":"1G"}},"size":3},"router":{"affinity":{"antiAffinityTopologyKey":"kubernetes.io/hostname"},"enabled":false,"image":"percona/percona-mysql-router:8.4","imagePullPolicy":"Always","resources":{"limits":{"memory":"256M"},"requests":{"memory":"256M"}},"size":3}},"secretsName":"test-secrets","sslSecretName":"test-ssl","toolkit":{"image":"perconalab/percona-server-mysql-operator:main-toolkit","imagePullPolicy":"Always"},"updateStrategy":"SmartUpdate","upgradeOptions":{"apply":"disabled","versionServiceEndpoint":"https://check.percona.com"}}} finalizers: - percona.com/delete-mysql-pods-in-order + managedFields: '[... elided field over 10 lines long ...]' name: monitoring namespace: kuttl-test-major-krill +spec: '[... elided field over 10 lines long ...]' status: - mysql: + conditions: '[... elided field over 10 lines long ...]' + haproxy: ready: 3 size: 3 state: ready + host: 35.223.217.148 + mysql: + size: 3 + state: initializing orchestrator: ready: 3 size: 3 state: ready - state: ready + router: {} + state: initializing case.go:380: resource PerconaServerMySQL:kuttl-test-major-krill/monitoring: .status.mysql.state: value mismatch, expected: ready != actual: initializing logger.go:42: 22:45:05 | monitoring | monitoring events from ns kuttl-test-major-krill: logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:20 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-major-krill/mysql-client to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-4d43 default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:21 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.4" already present on machine kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:21 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:21 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmmdata-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-major-krill/pmmdata-monitoring-0" pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:32 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:36 +0000 UTC Normal Pod monitoring-0 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-0 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-c2qc default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:36 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-1b384438-d569-4094-aee2-28489d518b39 pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:44 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-1b384438-d569-4094-aee2-28489d518b39" attachdetach-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:45 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulling Pulling image "perconalab/pmm-server:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:45 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulled Successfully pulled image "perconalab/pmm-server:dev-latest" in 139ms (139ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:45 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Created Created container monitoring kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:35:45 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Started Started container monitoring kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:07 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-major-krill/datadir-monitoring-mysql-0" pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal Service monitoring-haproxy EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:45 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:46 +0000 UTC Normal Pod monitoring-orc-0 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-orc-0 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-g54z default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:46 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:46 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:47 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 174ms (174ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:47 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:47 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:48 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:48 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 109ms (109ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-18babe5a-d760-4619-b527-12231a3de413 pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-mysql-0 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-mysql-0 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-4d43 default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 127ms (127ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:49 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:36:57 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-18babe5a-d760-4619-b527-12231a3de413" attachdetach-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 214ms (214ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 104ms (104ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.4" in 155ms (155ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 125ms (125ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:03 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:04 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 149ms (149ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:04 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:04 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:13 +0000 UTC Normal Service monitoring-haproxy EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:21 +0000 UTC Normal Pod monitoring-orc-1 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-orc-1 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-c2qc default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:21 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:22 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:22 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 194ms (194ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:22 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:22 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:24 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 138ms (138ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 135ms (135ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:25 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:34 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 108ms (109ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:35 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:35 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-major-krill/datadir-monitoring-mysql-1" pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:35 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:35 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:35 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal Pod monitoring-haproxy-0 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-haproxy-0 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-4d43 default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 160ms (160ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:38 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:39 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-bccebede-f3d2-49ff-99aa-ae288b282afd pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:39 +0000 UTC Normal Pod monitoring-mysql-1 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-mysql-1 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-g54z default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 122ms (122ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 123ms (123ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 131ms (132ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:41 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:41 +0000 UTC Normal Pod monitoring-haproxy-1 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-haproxy-1 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-c2qc default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:41 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:42 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:42 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 177ms (177ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:42 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:42 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 140ms (140ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 136ms (136ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 168ms (168ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal Pod monitoring-haproxy-2 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-haproxy-2 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-g54z default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 165ms (165ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:45 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod monitoring-mysql-0_kuttl-test-major-krill(a614a857-cc95-4050-9eaa-0b442a3d4c73) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:47 +0000 UTC Normal Pod monitoring-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-bccebede-f3d2-49ff-99aa-ae288b282afd" attachdetach-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 134ms (134ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 113ms (113ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 172ms (172ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 209ms (209ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 136ms (136ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.4" in 108ms (108ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 135ms (135ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:50 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 170ms (170ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:51 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:51 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:57 +0000 UTC Normal Pod monitoring-orc-2 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-orc-2 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-4d43 default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:57 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:58 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:58 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 167ms (167ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:58 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:37:58 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 120ms (120ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 108ms (108ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:00 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:08 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/01/15 22:38:07 Peers: [3637333162633735.monitoring-mysql-unready.kuttl-test-major-krill 6132363164326162.monitoring-mysql-unready.kuttl-test-major-krill] 2025/01/15 22:38:07 FQDN: monitoring-mysql-1.monitoring-mysql.kuttl-test-major-krill 2025/01/15 22:38:08 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-major-krill] 2025/01/15 22:38:08 lookup monitoring-mysql-1 [10.200.130.16] 2025/01/15 22:38:08 PodIP: 10.200.130.16 2025/01/15 22:38:08 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill [10.200.129.16] 2025/01/15 22:38:08 PrimaryIP: 10.200.129.16 2025/01/15 22:38:08 Donor: monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill 2025/01/15 22:38:08 Opening connection to 10.200.130.16 2025/01/15 22:38:08 Clone required: true 2025/01/15 22:38:08 Checking if a clone in progress 2025/01/15 22:38:08 Clone in progress: false 2025/01/15 22:38:08 Cloning from monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill 2025/01/15 22:38:08 Clone finished. Restarting container... kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:08 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:11 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 120ms (120ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:42 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:42 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:42 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-major-krill/datadir-monitoring-mysql-2" pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:42 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:42 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:46 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-f22260c0-d50c-4e64-a471-18cdc97c0da1 pd.csi.storage.gke.io_gke-fb9244d32f514c2e92d2-9bf2-748c-vm_726ed6d8-0b4f-45ab-b3fc-81405a0690aa logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:46 +0000 UTC Normal Pod monitoring-mysql-2 Scheduled Successfully assigned kuttl-test-major-krill/monitoring-mysql-2 to gke-jen-ps-676-5e3c84d9--default-pool-886bc0d2-c2qc default-scheduler logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:54 +0000 UTC Normal Pod monitoring-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-f22260c0-d50c-4e64-a471-18cdc97c0da1" attachdetach-controller logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-676-5e3c84d9" in 188ms (188ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "percona/percona-server:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 117ms (117ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "percona/percona-xtrabackup:8.4" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "percona/percona-xtrabackup:8.4" in 136ms (136ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 124ms (125ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:58 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 196ms (196ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:58 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:38:58 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:39:15 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/01/15 22:39:14 Peers: [3637333162633735.monitoring-mysql-unready.kuttl-test-major-krill 6132363164326162.monitoring-mysql-unready.kuttl-test-major-krill 6364373239643233.monitoring-mysql-unready.kuttl-test-major-krill] 2025/01/15 22:39:14 FQDN: monitoring-mysql-2.monitoring-mysql.kuttl-test-major-krill 2025/01/15 22:39:15 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-major-krill monitoring-mysql-2.monitoring-mysql.kuttl-test-major-krill] 2025/01/15 22:39:15 lookup monitoring-mysql-2 [10.200.128.37] 2025/01/15 22:39:15 PodIP: 10.200.128.37 2025/01/15 22:39:15 bootstrap finished in 0.084423 seconds 2025/01/15 22:39:15 bootstrap failed: get primary IP: lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill: lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-major-krill on 10.110.144.10:53: no such host kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:39:15 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:39:18 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "percona/percona-server:8.4" in 127ms (127ms including waiting) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:42:54 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod monitoring-mysql-1_kuttl-test-major-krill(1720f646-93fb-4c2e-930a-e6bbc40c3278) kubelet logger.go:42: 22:45:05 | monitoring | 2025-01-15 22:43:59 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{pt-heartbeat} BackOff Back-off restarting failed container pt-heartbeat in pod monitoring-mysql-2_kuttl-test-major-krill(98e280cb-7775-40b3-aa81-57aa8442a864) kubelet logger.go:42: 22:45:05 | monitoring | Deleting namespace: kuttl-test-major-krill === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- FAIL: kuttl (647.96s) --- FAIL: kuttl/harness (0.00s) --- FAIL: kuttl/harness/monitoring (647.53s) FAIL