=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://35.188.151.249 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring === PAUSE kuttl/harness/monitoring === CONT kuttl/harness/monitoring logger.go:42: 08:27:46 | monitoring | Creating namespace: kuttl-test-hopeful-yeti logger.go:42: 08:27:46 | monitoring/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 08:27:46 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 08:27:46 | monitoring/0-deploy-operator | + source ../../functions logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ realpath ../../.. logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++++ pwd logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++ test_name=monitoring logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-721 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ VERSION=PR-721-bf839312 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++++ which gdate logger.go:42: 08:27:46 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:27:46 | monitoring/0-deploy-operator | ++++ which date logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ command -v oc logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ kubectl get nodes logger.go:42: 08:27:46 | monitoring/0-deploy-operator | +++ grep '^minikube' logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + init_temp_dir logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + deploy_operator logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + destroy_operator logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 08:27:47 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 08:27:47 | monitoring/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + true logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 08:27:47 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 08:27:47 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 08:27:48 | monitoring/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + true logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + create_namespace ps-operator logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + local namespace=ps-operator logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + [[ -n '' ]] logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 08:27:48 | monitoring/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 08:27:49 | monitoring/0-deploy-operator | namespace/ps-operator created logger.go:42: 08:27:49 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy/crd.yaml logger.go:42: 08:27:50 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 08:27:50 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 08:27:51 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 08:27:51 | monitoring/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 08:27:51 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy/cw-rbac.yaml logger.go:42: 08:27:52 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 08:27:52 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 08:27:52 | monitoring/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 08:27:52 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 08:27:52 | monitoring/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 08:27:52 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 08:27:52 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 08:27:52 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 08:27:52 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:27:52 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-721-bf839312"' /mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy/cw-operator.yaml logger.go:42: 08:27:54 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 08:27:54 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 08:27:54 | monitoring/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 08:27:54 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-hopeful-yeti apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf/secrets.yaml logger.go:42: 08:27:55 | monitoring/0-deploy-operator | secret/test-secrets created logger.go:42: 08:27:55 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 08:27:55 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-hopeful-yeti apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf/ssl-secret.yaml logger.go:42: 08:27:56 | monitoring/0-deploy-operator | secret/test-ssl created logger.go:42: 08:27:56 | monitoring/0-deploy-operator | + deploy_client logger.go:42: 08:27:56 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-hopeful-yeti apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf/client.yaml logger.go:42: 08:27:56 | monitoring/0-deploy-operator | pod/mysql-client created logger.go:42: 08:27:57 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:27:57 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:27:57 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 08:27:58 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:27:58 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:27:59 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 08:28:00 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:28:00 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:28:00 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 08:28:02 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:28:02 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:28:02 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 08:28:03 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:28:03 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:28:04 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 08:28:05 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 08:28:05 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 08:28:05 | monitoring/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 08:28:05 | monitoring/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 08:28:05 | monitoring/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 08:28:05 | monitoring/0-deploy-operator | ASSERT PASS logger.go:42: 08:28:05 | monitoring/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm_server sleep 30 # wait for PMM Server to start API_KEY=$(get_pmm_api_key) kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY'}}'] logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | + source ../../functions logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++++ pwd logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-721 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-721-bf839312 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++++ which gdate logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | ++++ which date logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ date=/usr/bin/date logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ command -v oc logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes logger.go:42: 08:28:05 | monitoring/1-deploy-pmm-server | +++ grep '^minikube' logger.go:42: 08:28:06 | monitoring/1-deploy-pmm-server | + deploy_pmm_server logger.go:42: 08:28:06 | monitoring/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 08:28:06 | monitoring/1-deploy-pmm-server | + helm install monitoring -n kuttl-test-hopeful-yeti --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz logger.go:42: 08:28:06 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-721/kubeconfig logger.go:42: 08:28:06 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-721/kubeconfig logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | NAME: monitoring logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Thu Aug 15 08:28:07 2024 logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-hopeful-yeti logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | STATUS: deployed logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | REVISION: 1 logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | NOTES: logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | endpoint: https://monitoring-service.kuttl-test-hopeful-yeti.svc.cluster.local:443 logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | login: admin logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | password: admin logger.go:42: 08:28:11 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:12 | monitoring/1-deploy-pmm-server | Error from server (BadRequest): pod monitoring-0 does not have a host assigned logger.go:42: 08:28:12 | monitoring/1-deploy-pmm-server | + echo 'Retry ' logger.go:42: 08:28:12 | monitoring/1-deploy-pmm-server | Retry logger.go:42: 08:28:12 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:17 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:17 | monitoring/1-deploy-pmm-server | + '[' 1 -ge 20 ']' logger.go:42: 08:28:17 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:18 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:18 | monitoring/1-deploy-pmm-server | + echo 'Retry 1' logger.go:42: 08:28:18 | monitoring/1-deploy-pmm-server | Retry 1 logger.go:42: 08:28:18 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:23 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:23 | monitoring/1-deploy-pmm-server | + '[' 2 -ge 20 ']' logger.go:42: 08:28:23 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:24 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:24 | monitoring/1-deploy-pmm-server | + echo 'Retry 2' logger.go:42: 08:28:24 | monitoring/1-deploy-pmm-server | Retry 2 logger.go:42: 08:28:24 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:29 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:29 | monitoring/1-deploy-pmm-server | + '[' 3 -ge 20 ']' logger.go:42: 08:28:29 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:30 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:30 | monitoring/1-deploy-pmm-server | + echo 'Retry 3' logger.go:42: 08:28:30 | monitoring/1-deploy-pmm-server | Retry 3 logger.go:42: 08:28:30 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:35 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:35 | monitoring/1-deploy-pmm-server | + '[' 4 -ge 20 ']' logger.go:42: 08:28:35 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:36 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:36 | monitoring/1-deploy-pmm-server | + echo 'Retry 4' logger.go:42: 08:28:36 | monitoring/1-deploy-pmm-server | Retry 4 logger.go:42: 08:28:36 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:41 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:41 | monitoring/1-deploy-pmm-server | + '[' 5 -ge 20 ']' logger.go:42: 08:28:41 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:42 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:42 | monitoring/1-deploy-pmm-server | + echo 'Retry 5' logger.go:42: 08:28:42 | monitoring/1-deploy-pmm-server | Retry 5 logger.go:42: 08:28:42 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:47 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:47 | monitoring/1-deploy-pmm-server | + '[' 6 -ge 20 ']' logger.go:42: 08:28:47 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:48 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:48 | monitoring/1-deploy-pmm-server | + echo 'Retry 6' logger.go:42: 08:28:48 | monitoring/1-deploy-pmm-server | Retry 6 logger.go:42: 08:28:48 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:53 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:53 | monitoring/1-deploy-pmm-server | + '[' 7 -ge 20 ']' logger.go:42: 08:28:53 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:28:54 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:28:54 | monitoring/1-deploy-pmm-server | + echo 'Retry 7' logger.go:42: 08:28:54 | monitoring/1-deploy-pmm-server | Retry 7 logger.go:42: 08:28:54 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:28:59 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:28:59 | monitoring/1-deploy-pmm-server | + '[' 8 -ge 20 ']' logger.go:42: 08:28:59 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:29:00 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:29:00 | monitoring/1-deploy-pmm-server | + echo 'Retry 8' logger.go:42: 08:29:00 | monitoring/1-deploy-pmm-server | Retry 8 logger.go:42: 08:29:00 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:29:05 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:29:05 | monitoring/1-deploy-pmm-server | + '[' 9 -ge 20 ']' logger.go:42: 08:29:05 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:29:06 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 08:29:06 | monitoring/1-deploy-pmm-server | + echo 'Retry 9' logger.go:42: 08:29:06 | monitoring/1-deploy-pmm-server | Retry 9 logger.go:42: 08:29:06 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 08:29:11 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 08:29:11 | monitoring/1-deploy-pmm-server | + '[' 10 -ge 20 ']' logger.go:42: 08:29:11 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 08:29:13 | monitoring/1-deploy-pmm-server | + sleep 30 logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | ++ get_pmm_api_key logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | ++ local key_name= logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]] logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | ++ key_name=operator logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD logger.go:42: 08:29:43 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD=admin logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | ++ jq .key logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | +++ get_service_ip monitoring-service logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | +++ local service=monitoring-service logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | +++ grep -q NotFound logger.go:42: 08:29:44 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:29:45 | monitoring/1-deploy-pmm-server | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 08:29:45 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 08:29:45 | monitoring/1-deploy-pmm-server | +++ egrep -q 'hostname|ip' logger.go:42: 08:29:45 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 08:29:45 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.41.136.180/graph/api/auth/keys logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | Dload Upload Total Spent Left Speed logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 155 100 119 100 36 256 77 --:--:-- --:--:-- --:--:-- 334 logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | + API_KEY='"eyJrIjoiWUJ0SFhZdFdSZ0dNSlFIY200N1BSVW85R0JVRUhGWVoiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' logger.go:42: 08:29:46 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-hopeful-yeti secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiWUJ0SFhZdFdSZ0dNSlFIY200N1BSVW85R0JVRUhGWVoiLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' logger.go:42: 08:29:47 | monitoring/1-deploy-pmm-server | secret/test-secrets patched [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 14 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc00032dc00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc00032dc00}, 0x0}, {0x184a055?, 0xc000937f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131eae8?, {0x0, 0xc0004b8af0, {0x1accd90, 0xc0004066c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc0005dbd48?, {0x0, 0xc0004b8af0, {0x1accd90, 0xc0004066c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0005dbd48, {0x0, 0xc0004b8af0, {0x1accd90, 0xc0004066c0}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0000ca608, 0x3?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc00019e680, 0xc0000fc4e0, {0xc000049b18, 0x17}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc00019e680, 0xc0000fc4e0, {0xc000049b18, 0x17}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc000392a00, 0xc0000fc4e0, 0xc00022d680) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0000fc4e0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc0000fc4e0, 0xc000580b58) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 13 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 08:29:48 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 08:29:48 | monitoring/2-create-cluster | starting test step 2-create-cluster logger.go:42: 08:29:48 | monitoring/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.pmm.enabled = true' - \ | yq eval '.spec.proxy.haproxy.enabled = true' - \ | yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 08:29:48 | monitoring/2-create-cluster | + source ../../functions logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ realpath ../../.. logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++++ pwd logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ test_name=monitoring logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-721 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ VERSION=PR-721-bf839312 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++++ which gdate logger.go:42: 08:29:48 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++++ which date logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ command -v oc logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ kubectl get nodes logger.go:42: 08:29:48 | monitoring/2-create-cluster | +++ grep '^minikube' logger.go:42: 08:29:48 | monitoring/2-create-cluster | + get_cr logger.go:42: 08:29:48 | monitoring/2-create-cluster | + local name_suffix= logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + '[' -n '' ']' logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:29:48 | monitoring/2-create-cluster | + kubectl -n kuttl-test-hopeful-yeti apply -f - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy/cr.yaml logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-721-bf839312"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 08:29:48 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:29:48 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 08:29:49 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created logger.go:42: 08:33:14 | monitoring/2-create-cluster | test step completed 2-create-cluster logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | starting test step 3-rotate-pmm-key logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | running command: [sh -c set -o errexit set -o xtrace source ../../functions # add new PMM API key to secret API_KEY_NEW=$(get_pmm_api_key "operator-new") kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY_NEW'}}' # delete old PMM key delete_pmm_api_key "operator" sleep 10] logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | + source ../../functions logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ realpath ../../.. logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++++ pwd logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ test_name=monitoring logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ GIT_BRANCH=PR-721 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ VERSION=PR-721-bf839312 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++++ which gdate logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++++ which date logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ date=/usr/bin/date logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ command -v oc logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ kubectl get nodes logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ grep '^minikube' logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ get_pmm_api_key operator-new logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ local key_name=operator-new logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ [[ -z operator-new ]] logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | ++ local ADMIN_PASSWORD logger.go:42: 08:33:14 | monitoring/3-rotate-pmm-key | +++ kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | ++ ADMIN_PASSWORD=admin logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | ++ jq .key logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:15 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound logger.go:42: 08:33:16 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:16 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 08:33:16 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 08:33:16 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip' logger.go:42: 08:33:17 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 08:33:17 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.41.136.180/graph/api/auth/keys logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 271 85 --:--:-- --:--:-- --:--:-- 356 100 167 100 127 100 40 271 85 --:--:-- --:--:-- --:--:-- 356 logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | + API_KEY_NEW='"eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' logger.go:42: 08:33:18 | monitoring/3-rotate-pmm-key | + kubectl patch -n kuttl-test-hopeful-yeti secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | secret/test-secrets patched logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | + delete_pmm_api_key operator logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | + local key_name=operator logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | + [[ -z operator ]] logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | + local ADMIN_PASSWORD logger.go:42: 08:33:19 | monitoring/3-rotate-pmm-key | ++ kubectl -n kuttl-test-hopeful-yeti exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | + ADMIN_PASSWORD=admin logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | + local key_id logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | ++ jq '.[] | select( .name == "operator").id' logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound logger.go:42: 08:33:20 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:21 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 08:33:21 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 08:33:21 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip' logger.go:42: 08:33:21 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 08:33:21 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X GET https://admin:admin@34.41.136.180/graph/api/auth/keys logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 89 100 89 0 0 198 0 --:--:-- --:--:-- --:--:-- 198 logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | + key_id=1 logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | ++ get_service_ip monitoring-service logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | ++ local service=monitoring-service logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:22 | monitoring/3-rotate-pmm-key | ++ grep -q NotFound logger.go:42: 08:33:23 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:33:23 | monitoring/3-rotate-pmm-key | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 08:33:23 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 08:33:23 | monitoring/3-rotate-pmm-key | ++ egrep -q 'hostname|ip' logger.go:42: 08:33:23 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | + curl --insecure -X DELETE https://admin:admin@34.41.136.180/graph/api/auth/keys/1 logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 62 0 --:--:-- --:--:-- --:--:-- 62 logger.go:42: 08:33:24 | monitoring/3-rotate-pmm-key | {"message":"API key deleted"}+ sleep 10 logger.go:42: 08:37:38 | monitoring/3-rotate-pmm-key | test step completed 3-rotate-pmm-key logger.go:42: 08:37:38 | monitoring/4-check-metrics | starting test step 4-check-metrics logger.go:42: 08:37:38 | monitoring/4-check-metrics | running command: [sh -c set -o errexit set -o xtrace source ../../functions sleep 70 # we should wait more than one minute because `get_metric_values` gets data for the last 60 seconds API_KEY=$(kubectl get secret internal-monitoring -o jsonpath='{.data.pmmserverkey}' -n "${NAMESPACE}" | base64 --decode) for i in $(seq 0 2); do get_metric_values node_boot_time_seconds ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY get_metric_values mysql_global_status_uptime ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY done sleep 90 # wait for QAN get_qan20_values monitoring-mysql-0 api_key:$API_KEY haproxy_svc=$(get_service_ip "monitoring-haproxy") http_code=$(curl -s -o /dev/null -w "%{http_code}" http://${haproxy_svc}:8404/metrics) if [[ $http_code != 200 ]]; then echo "Error: http code is $http_code" exit 1 fi] logger.go:42: 08:37:38 | monitoring/4-check-metrics | + source ../../functions logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ realpath ../../.. logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++++ pwd logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++ test_name=monitoring logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ GIT_BRANCH=PR-721 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ VERSION=PR-721-bf839312 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++++ which gdate logger.go:42: 08:37:38 | monitoring/4-check-metrics | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:37:38 | monitoring/4-check-metrics | ++++ which date logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ date=/usr/bin/date logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ command -v oc logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ kubectl get nodes logger.go:42: 08:37:38 | monitoring/4-check-metrics | +++ grep '^minikube' logger.go:42: 08:37:39 | monitoring/4-check-metrics | + sleep 70 logger.go:42: 08:38:49 | monitoring/4-check-metrics | ++ kubectl get secret internal-monitoring -o 'jsonpath={.data.pmmserverkey}' -n kuttl-test-hopeful-yeti logger.go:42: 08:38:49 | monitoring/4-check-metrics | ++ base64 --decode logger.go:42: 08:38:49 | monitoring/4-check-metrics | + API_KEY=eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:49 | monitoring/4-check-metrics | ++ seq 0 2 logger.go:42: 08:38:49 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 08:38:49 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:49 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:50 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-hopeful-yeti-monitoring-mysql-0 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:50 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 08:38:50 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-0 logger.go:42: 08:38:50 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:50 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:50 | monitoring/4-check-metrics | + local start=1723711070 logger.go:42: 08:38:50 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:50 | monitoring/4-check-metrics | + local end=1723711130 logger.go:42: 08:38:50 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:38:51 | monitoring/4-check-metrics | "1723707702" logger.go:42: 08:38:51 | monitoring/4-check-metrics | "1723707702" logger.go:42: 08:38:51 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:51 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:52 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-hopeful-yeti-monitoring-mysql-0 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:52 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 08:38:52 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-0 logger.go:42: 08:38:52 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:52 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:52 | monitoring/4-check-metrics | + local start=1723711072 logger.go:42: 08:38:52 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:52 | monitoring/4-check-metrics | + local end=1723711132 logger.go:42: 08:38:52 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:38:53 | monitoring/4-check-metrics | "183" logger.go:42: 08:38:53 | monitoring/4-check-metrics | "115" logger.go:42: 08:38:53 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 08:38:53 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:53 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:54 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-hopeful-yeti-monitoring-mysql-1 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:54 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 08:38:54 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-1 logger.go:42: 08:38:54 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:54 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:54 | monitoring/4-check-metrics | + local start=1723711074 logger.go:42: 08:38:54 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:54 | monitoring/4-check-metrics | + local end=1723711134 logger.go:42: 08:38:54 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:38:55 | monitoring/4-check-metrics | "1723707701" logger.go:42: 08:38:55 | monitoring/4-check-metrics | "1723707701" logger.go:42: 08:38:55 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:55 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:55 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-hopeful-yeti-monitoring-mysql-1 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:55 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 08:38:55 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-1 logger.go:42: 08:38:55 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:55 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:55 | monitoring/4-check-metrics | + local start=1723711075 logger.go:42: 08:38:55 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:55 | monitoring/4-check-metrics | + local end=1723711135 logger.go:42: 08:38:55 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:38:57 | monitoring/4-check-metrics | "9" logger.go:42: 08:38:57 | monitoring/4-check-metrics | "286" logger.go:42: 08:38:57 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 08:38:57 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:57 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:57 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-hopeful-yeti-monitoring-mysql-2 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:57 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 08:38:57 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-2 logger.go:42: 08:38:57 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:57 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:57 | monitoring/4-check-metrics | + local start=1723711077 logger.go:42: 08:38:57 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:57 | monitoring/4-check-metrics | + local end=1723711137 logger.go:42: 08:38:57 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:38:59 | monitoring/4-check-metrics | "1723707702" logger.go:42: 08:38:59 | monitoring/4-check-metrics | "1723707702" logger.go:42: 08:38:59 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 08:38:59 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-hopeful-yeti get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 08:38:59 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-hopeful-yeti-monitoring-mysql-2 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:59 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 08:38:59 | monitoring/4-check-metrics | + local instance=kuttl-test-hopeful-yeti-monitoring-mysql-2 logger.go:42: 08:38:59 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:38:59 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 08:38:59 | monitoring/4-check-metrics | + local start=1723711079 logger.go:42: 08:38:59 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 08:38:59 | monitoring/4-check-metrics | + local end=1723711139 logger.go:42: 08:38:59 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 08:39:01 | monitoring/4-check-metrics | "54" logger.go:42: 08:39:01 | monitoring/4-check-metrics | "221" logger.go:42: 08:39:01 | monitoring/4-check-metrics | + sleep 90 logger.go:42: 08:40:31 | monitoring/4-check-metrics | + get_qan20_values monitoring-mysql-0 api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local instance=monitoring-mysql-0 logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 08:40:31 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local start=2024-08-15T08:10:31 logger.go:42: 08:40:31 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local end=2024-08-15T08:40:31 logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local endpoint=monitoring-service logger.go:42: 08:40:31 | monitoring/4-check-metrics | ++ cat logger.go:42: 08:40:31 | monitoring/4-check-metrics | +++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 08:40:31 | monitoring/4-check-metrics | +++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 08:40:31 | monitoring/4-check-metrics | + local 'payload={ logger.go:42: 08:40:31 | monitoring/4-check-metrics | "columns":[ logger.go:42: 08:40:31 | monitoring/4-check-metrics | "load", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "num_queries", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "query_time" logger.go:42: 08:40:31 | monitoring/4-check-metrics | ], logger.go:42: 08:40:31 | monitoring/4-check-metrics | "first_seen": false, logger.go:42: 08:40:31 | monitoring/4-check-metrics | "group_by": "queryid", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "include_only_fields": [], logger.go:42: 08:40:31 | monitoring/4-check-metrics | "keyword": "", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "labels": [ logger.go:42: 08:40:31 | monitoring/4-check-metrics | { logger.go:42: 08:40:31 | monitoring/4-check-metrics | "key": "cluster", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "value": ["monitoring"] logger.go:42: 08:40:31 | monitoring/4-check-metrics | }], logger.go:42: 08:40:31 | monitoring/4-check-metrics | "limit": 10, logger.go:42: 08:40:31 | monitoring/4-check-metrics | "offset": 0, logger.go:42: 08:40:31 | monitoring/4-check-metrics | "order_by": "-load", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "main_metric": "load", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "period_start_from": "2024-08-14T20:40:31+00:00", logger.go:42: 08:40:31 | monitoring/4-check-metrics | "period_start_to": "2024-08-15T08:40:31+00:00" logger.go:42: 08:40:31 | monitoring/4-check-metrics | }' logger.go:42: 08:40:31 | monitoring/4-check-metrics | + jq '.rows[].fingerprint' logger.go:42: 08:40:31 | monitoring/4-check-metrics | ++ sed 's/\n//g' logger.go:42: 08:40:31 | monitoring/4-check-metrics | ++ echo '{' '"columns":[' '"load",' '"num_queries",' '"query_time"' '],' '"first_seen":' false, '"group_by":' '"queryid",' '"include_only_fields":' '[],' '"keyword":' '"",' '"labels":' '[' '{' '"key":' '"cluster",' '"value":' '["monitoring"]' '}],' '"limit":' 10, '"offset":' 0, '"order_by":' '"-load",' '"main_metric":' '"load",' '"period_start_from":' '"2024-08-14T20:40:31+00:00",' '"period_start_to":' '"2024-08-15T08:40:31+00:00"' '}' logger.go:42: 08:40:31 | monitoring/4-check-metrics | + run_curl -XPOST -d ''\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-08-14T20:40:31+00:00", "period_start_to": "2024-08-15T08:40:31+00:00" }'\''' https://api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport logger.go:42: 08:40:31 | monitoring/4-check-metrics | + kubectl -n kuttl-test-hopeful-yeti exec mysql-client -- bash -c 'curl -s -k -XPOST -d '\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-08-14T20:40:31+00:00", "period_start_to": "2024-08-15T08:40:31+00:00" }'\'' https://api_key:eyJrIjoiUUtLbk1PRkVDWXlaNEtscVFhQlBNazVSVTYyZ0VNZVciLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport' logger.go:42: 08:40:33 | monitoring/4-check-metrics | "TOTAL" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "REPLACE INTO `sys_operator` . `heartbeat` ( `ts` , `server_id` , FILE , `position` , `relay_master_log_file` , `exec_master_log_pos` ) VALUES (...)" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT `EVENT_NAME` , `COUNT_STAR` , `SUM_TIMER_WAIT` FROM `performance_schema` . `events_waits_summary_global_by_event_name`" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SHOW GLOBAL VARIABLES LIKE ?" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT COLUMN_NAME FROM `information_schema` . `columns` WHERE `table_schema` = ? AND TABLE_NAME = ? AND COLUMN_NAME IN (...) LIMIT ?" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS LIKE ?" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT NAME , `subsystem` , TYPE , COMMENT , `count` FROM `information_schema` . `innodb_metrics` WHERE `status` = ?" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT `conn_status` . `channel_name` AS `channel_name` , `conn_status` . `service_state` AS RELAY_THREAD , `applier_status` . `service_state` AS SQL_THREAD , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_END_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` ? , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_START_APPLY_TIMESTAMP` ? , IF ( `GTID_SUBTRACT` ( `LAST_QUEUED_TRANSACTION` , `LAST_APPLIED_TRANSACTION` ) = ?, ... , `abs` ( `time_to_sec` ( IF ( `time_to_sec` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ) = ?, ... , `timediff` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` , NOW ( ) ) ) ) ) ) `lag_in_seconds` FROM `performance_schema` . `replication_connection_status` AS `conn_status` JOIN `performance_schema` . `replication_applier_status_by_worker` AS `applier_status` ON" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT `t` . `table_schema` , `t` . `table_name` , COLUMN_NAME , AUTO_INCREMENT , `pow` ( ? , CASE `data_type` WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? END + ( `column_type` LIKE ? ) ) - ? AS `max_int` FROM `information_schema` . `columns` `c` STRAIGHT_JOIN `information_schema` . `tables` `t` ON BINARY `t` . `table_schema` = `c` . `table_schema` AND BINARY `t` . `table_name` = `c` . `table_name` WHERE `c` . `extra` = ? AND `t` . `auto_increment` IS NOT NULL" logger.go:42: 08:40:33 | monitoring/4-check-metrics | "SELECT `substring_index` ( HOST , ?, ... ) AS `slave_hostname` FROM `information_schema` . `processlist` WHERE `command` IN (...)" logger.go:42: 08:40:33 | monitoring/4-check-metrics | ++ get_service_ip monitoring-haproxy logger.go:42: 08:40:33 | monitoring/4-check-metrics | ++ local service=monitoring-haproxy logger.go:42: 08:40:33 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:40:33 | monitoring/4-check-metrics | ++ grep -q NotFound logger.go:42: 08:40:34 | monitoring/4-check-metrics | +++ kubectl get service/monitoring-haproxy -n kuttl-test-hopeful-yeti -o 'jsonpath={.spec.type}' logger.go:42: 08:40:34 | monitoring/4-check-metrics | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 08:40:34 | monitoring/4-check-metrics | ++ egrep -q 'hostname|ip' logger.go:42: 08:40:34 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 08:40:35 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 08:40:35 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-hopeful-yeti -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 08:40:35 | monitoring/4-check-metrics | + haproxy_svc=34.70.71.186 logger.go:42: 08:40:35 | monitoring/4-check-metrics | ++ curl -s -o /dev/null -w '%{http_code}' http://34.70.71.186:8404/metrics logger.go:42: 08:40:36 | monitoring/4-check-metrics | + http_code=200 logger.go:42: 08:40:36 | monitoring/4-check-metrics | + [[ 200 != 200 ]] logger.go:42: 08:40:36 | monitoring/4-check-metrics | test step completed 4-check-metrics logger.go:42: 08:40:36 | monitoring/5-check-password-leak | starting test step 5-check-password-leak logger.go:42: 08:40:36 | monitoring/5-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 08:40:36 | monitoring/5-check-password-leak | + source ../../functions logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ realpath ../../.. logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++++ pwd logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++ test_name=monitoring logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ GIT_BRANCH=PR-721 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ VERSION=PR-721-bf839312 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++++ which gdate logger.go:42: 08:40:36 | monitoring/5-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++++ which date logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ date=/usr/bin/date logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ command -v oc logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ kubectl get nodes logger.go:42: 08:40:36 | monitoring/5-check-password-leak | +++ grep '^minikube' logger.go:42: 08:40:36 | monitoring/5-check-password-leak | + check_passwords_leak logger.go:42: 08:40:36 | monitoring/5-check-password-leak | + local secrets logger.go:42: 08:40:36 | monitoring/5-check-password-leak | + local passwords logger.go:42: 08:40:36 | monitoring/5-check-password-leak | + local pods logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 08:40:36 | monitoring/5-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + secrets= logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + passwords=' ' logger.go:42: 08:40:37 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pods -o name logger.go:42: 08:40:37 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + pods='monitoring-0 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-haproxy-0 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-haproxy-1 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-haproxy-2 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-mysql-0 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-mysql-1 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-mysql-2 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-orc-0 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-orc-1 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | monitoring-orc-2 logger.go:42: 08:40:37 | monitoring/5-check-password-leak | mysql-client' logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + collect_logs kuttl-test-hopeful-yeti logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + local containers logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + local count logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + NS=kuttl-test-hopeful-yeti logger.go:42: 08:40:37 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:37 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + containers=monitoring logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-0 -c monitoring logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt logger.go:42: 08:40:38 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:38 | monitoring/5-check-password-leak | logger.go:42: 08:40:38 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:38 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-0 -c haproxy logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 08:40:39 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:39 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-0 -c mysql-monit logger.go:42: 08:40:40 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 08:40:40 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 08:40:40 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:40 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-0 -c pmm-client logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 08:40:41 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:41 | monitoring/5-check-password-leak | logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:41 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:41 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-1 -c haproxy logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 08:40:42 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-1 -c mysql-monit logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 08:40:42 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:42 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-1 -c pmm-client logger.go:42: 08:40:43 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 08:40:43 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 08:40:43 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:43 | monitoring/5-check-password-leak | logger.go:42: 08:40:43 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:43 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-2 -c haproxy logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 08:40:44 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:44 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-2 -c mysql-monit logger.go:42: 08:40:45 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 08:40:45 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 08:40:45 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:45 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-haproxy-2 -c pmm-client logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 08:40:46 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:46 | monitoring/5-check-password-leak | logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:46 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:46 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-0 -c mysql logger.go:42: 08:40:47 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 08:40:47 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 08:40:47 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:47 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-0 -c xtrabackup logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 08:40:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-0 -c pt-heartbeat logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 08:40:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:48 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-0 -c pmm-client logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 08:40:49 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:49 | monitoring/5-check-password-leak | logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:49 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:49 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-1 -c mysql logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 08:40:50 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-1 -c xtrabackup logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 08:40:50 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:50 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-1 -c pt-heartbeat logger.go:42: 08:40:51 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 08:40:51 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 08:40:51 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:51 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-1 -c pmm-client logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 08:40:52 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:52 | monitoring/5-check-password-leak | logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:52 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:52 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-2 -c mysql logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 08:40:53 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-2 -c xtrabackup logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 08:40:53 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:53 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-2 -c pt-heartbeat logger.go:42: 08:40:54 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 08:40:54 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 08:40:54 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:54 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-mysql-2 -c pmm-client logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 08:40:55 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:55 | monitoring/5-check-password-leak | logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:55 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:55 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-0 -c orc logger.go:42: 08:40:56 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 08:40:56 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 08:40:56 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:56 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-0 -c mysql-monit logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 08:40:57 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:57 | monitoring/5-check-password-leak | logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:57 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:57 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-1 -c orc logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 08:40:58 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-1 -c mysql-monit logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 08:40:58 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + echo logger.go:42: 08:40:58 | monitoring/5-check-password-leak | logger.go:42: 08:40:58 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:40:58 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod monitoring-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:40:59 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 08:40:59 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:40:59 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-2 -c orc logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 08:41:00 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs monitoring-orc-2 -c mysql-monit logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 08:41:00 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + echo logger.go:42: 08:41:00 | monitoring/5-check-password-leak | logger.go:42: 08:41:00 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:41:00 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-hopeful-yeti get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + containers=mysql-client logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-hopeful-yeti logs mysql-client -c mysql-client logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 08:41:01 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + echo logger.go:42: 08:41:01 | monitoring/5-check-password-leak | logger.go:42: 08:41:01 | monitoring/5-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 08:41:01 | monitoring/5-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 08:41:01 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + pods=percona-server-mysql-operator-65db5964c8-qkzqx logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + collect_logs ps-operator logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + local containers logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + local count logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + NS=ps-operator logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 08:41:02 | monitoring/5-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-65db5964c8-qkzqx -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + containers=manager logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 08:41:02 | monitoring/5-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-65db5964c8-qkzqx -c manager logger.go:42: 08:41:03 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-65db5964c8-qkzqx-manager.txt logger.go:42: 08:41:03 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-65db5964c8-qkzqx-manager.txt logger.go:42: 08:41:03 | monitoring/5-check-password-leak | + echo logger.go:42: 08:41:03 | monitoring/5-check-password-leak | logger.go:42: 08:41:03 | monitoring/5-check-password-leak | test step completed 5-check-password-leak logger.go:42: 08:41:03 | monitoring/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 08:41:04 | monitoring/98-drop-finalizer | PerconaServerMySQL:kuttl-test-hopeful-yeti/monitoring updated logger.go:42: 08:41:04 | monitoring/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/tests/monitoring logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++ test_name=monitoring logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/vars.sh logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-721 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/deploy logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-721/e2e-tests/conf logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-721 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-721 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export VERSION=PR-721-bf839312 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ VERSION=PR-721-bf839312 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-721-bf839312 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.15.1 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.15.1 logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-721/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | ++++ which date logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ command -v oc logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 08:41:04 | monitoring/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 08:41:05 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 08:41:06 | monitoring/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 08:41:11 | monitoring/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 08:41:12 | monitoring | monitoring events from ns kuttl-test-hopeful-yeti: logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:27:56 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-hopeful-yeti/mysql-client to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:27:57 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:27:57 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:27:57 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:10 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmmdata-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:10 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:11 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:11 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:11 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-hopeful-yeti/pmmdata-monitoring-0" pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:11 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:14 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-aefbe87c-2030-4bcd-b2c9-0017adcafe66 pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:15 +0000 UTC Normal Pod monitoring-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:19 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-aefbe87c-2030-4bcd-b2c9-0017adcafe66" attachdetach-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:24 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulling Pulling image "perconalab/pmm-server:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:28:48 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:10 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulled Successfully pulled image "perconalab/pmm-server:dev-latest" in 45.895s (45.895s including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:10 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Created Created container monitoring kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:10 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Started Started container monitoring kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-hopeful-yeti/datadir-monitoring-mysql-0" pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal Service monitoring-haproxy EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:54 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:55 +0000 UTC Normal Pod monitoring-orc-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-orc-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-d8c2 default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:55 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:56 +0000 UTC Warning Pod monitoring-orc-0 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:57 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:57 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 126ms (126ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:57 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:57 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:58 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-74eaf5c2-4b9f-4834-a15a-e15758327afc pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:58 +0000 UTC Normal Pod monitoring-mysql-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-9mnw default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 87ms (87ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 85ms (85ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:29:59 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:00 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:06 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-74eaf5c2-4b9f-4834-a15a-e15758327afc" attachdetach-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:07 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:08 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 111ms (111ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:08 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:08 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:09 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 90ms (90ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 85ms (85ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 84ms (84ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:10 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:20 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 9.452s (9.452s including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:20 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:20 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:22 +0000 UTC Normal Service monitoring-haproxy EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal Pod monitoring-orc-1 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-orc-1 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 113ms (113ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:32 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 101ms (101ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 117ms (117ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:34 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:42 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:42 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:42 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:43 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:43 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-hopeful-yeti/datadir-monitoring-mysql-1" pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:45 +0000 UTC Normal Pod monitoring-haproxy-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:45 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:45 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:46 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 115ms (115ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:46 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:46 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-c0105f08-e704-4769-aa27-e91e31f7fa6a pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 106ms (106ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94ms (94ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:48 +0000 UTC Normal Pod monitoring-mysql-1 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-1 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-d8c2 default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:55 +0000 UTC Normal Pod monitoring-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c0105f08-e704-4769-aa27-e91e31f7fa6a" attachdetach-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 8.248s (8.248s including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal Pod monitoring-haproxy-1 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-1 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-9mnw default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 107ms (107ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 98ms (98ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:57 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:58 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:58 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 94ms (94ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:58 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 78ms (78ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 97ms (98ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 86ms (86ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 100ms (100ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 97ms (97ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:30:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:00 +0000 UTC Normal Pod monitoring-haproxy-2 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-2 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-d8c2 default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:00 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:00 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 165ms (165ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:00 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:01 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:01 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:04 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:04 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94ms (94ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:04 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 107ms (107ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 2.412s (2.412s including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 8.318s (8.318s including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal Pod monitoring-orc-2 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-orc-2 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-9mnw default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:07 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:08 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:08 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 108ms (108ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:08 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:08 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 109ms (109ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 91ms (91ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:10 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:17 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:17 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:22 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 89ms (89ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:56 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:56 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:56 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:57 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-hopeful-yeti/datadir-monitoring-mysql-2" pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:31:57 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:00 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-c58bc946-312b-43a0-af5b-66b87362bb72 pd.csi.storage.gke.io_gke-ee252121e9a74123bbaf-52ba-d845-vm_09e6208a-a7a0-4f58-9768-cfdd10576ed7 logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:01 +0000 UTC Normal Pod monitoring-mysql-2 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-2 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:08 +0000 UTC Normal Pod monitoring-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c58bc946-312b-43a0-af5b-66b87362bb72" attachdetach-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:10 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:10 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 123ms (123ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:10 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:10 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 80ms (80ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 116ms (116ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:11 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 83ms (83ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 99ms (99ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:12 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:31 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:31 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:32:34 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 89ms (89ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:26 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:26 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:26 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:26 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:27 +0000 UTC Normal Pod monitoring-haproxy-2 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-2 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-d8c2 default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 134ms (134ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 83ms (83ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 82ms (82ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 104ms (104ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:30 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:31 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:31 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:31 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:31 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:32 +0000 UTC Normal Pod monitoring-haproxy-1 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-1 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-9mnw default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 111ms (111ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 98ms (98ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:35 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 124ms (124ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 132ms (132ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:36 +0000 UTC Normal Pod monitoring-mysql-1 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-1 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-d8c2 default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:44 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:44 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 102ms (102ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:44 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:44 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 86ms (86ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 95ms (95ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-haproxy-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-haproxy-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 92ms (92ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 105ms (105ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:47 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:48 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 122ms (122ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:48 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:48 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:49 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 95ms (95ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 102ms (102ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 101ms (101ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:33:50 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:05 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:05 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:09 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 94ms (94ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:45 +0000 UTC Normal Pod monitoring-mysql-2 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-2 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-lq3l default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:53 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:53 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 148ms (148ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:53 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:53 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 101ms (101ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 97ms (97ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 95ms (95ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:55 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 90ms (90ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:34:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:14 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:14 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:18 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 103ms (103ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:35:57 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/08/15 08:35:57 readiness check failed: connect to db: ping DB: dial tcp 10.71.81.23:33062: connect: connection refused kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:02 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/08/15 08:36:02 readiness check failed: connect to db: ping DB: dial tcp 10.71.81.23:33062: connect: connection refused kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:15 +0000 UTC Normal Pod monitoring-mysql-0 Scheduled Successfully assigned kuttl-test-hopeful-yeti/monitoring-mysql-0 to gke-jen-ps-721-bf839312--default-pool-05a92fb0-9mnw default-scheduler logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:30 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-74eaf5c2-4b9f-4834-a15a-e15758327afc" attachdetach-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:31 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-721-bf839312" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:31 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-721-bf839312" in 101ms (102ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:31 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:31 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 100ms (100ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 98ms (98ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 134ms (134ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 98ms (98ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:34 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:52 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:52 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:36:55 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 78ms (78ms including waiting) kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:04 +0000 UTC Normal Service monitoring-haproxy DeletingLoadBalancer Deleting load balancer service-controller logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 08:41:12 | monitoring | 2024-08-15 08:41:05 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 08:41:12 | monitoring | Deleting namespace: kuttl-test-hopeful-yeti === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (846.15s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/monitoring (845.63s) PASS