=== RUN   kuttl
    harness.go:464: starting setup
    harness.go:255: running tests using configured kubeconfig.
    harness.go:278: Successful connection to cluster at: https://34.45.184.99
    harness.go:363: running tests
    harness.go:75: going to run test suite with timeout of 180 seconds for each step
    harness.go:375: testsuite: e2e-tests/tests has 33 tests
=== RUN   kuttl/harness
=== RUN   kuttl/harness/monitoring
=== PAUSE kuttl/harness/monitoring
=== CONT  kuttl/harness/monitoring
    logger.go:42: 10:31:50 | monitoring | Creating namespace: kuttl-test-precious-dragon
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | starting test step 0-deploy-operator
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        init_temp_dir # do this only in the first TestStep
        
        deploy_operator
        deploy_non_tls_cluster_secrets
        deploy_tls_cluster_secrets
        deploy_client]
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | + source ../../functions
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ realpath ../../..
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++++ pwd
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++ test_name=monitoring
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++++ which gdate
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | ++++ which date
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ date=/usr/bin/date
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ command -v oc
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ kubectl get nodes
    logger.go:42: 10:31:50 | monitoring/0-deploy-operator | +++ grep '^minikube'
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + init_temp_dir
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + deploy_operator
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + destroy_operator
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon delete deployment percona-server-mysql-operator --force --grace-period=0
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + true
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + [[ -n '' ]]
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + [[ -n '' ]]
    logger.go:42: 10:31:51 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy/crd.yaml
    logger.go:42: 10:31:52 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied
    logger.go:42: 10:31:52 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied
    logger.go:42: 10:31:54 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied
    logger.go:42: 10:31:54 | monitoring/0-deploy-operator | + '[' -n '' ']'
    logger.go:42: 10:31:54 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy/rbac.yaml
    logger.go:42: 10:31:54 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created
    logger.go:42: 10:31:55 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created
    logger.go:42: 10:31:55 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created
    logger.go:42: 10:31:55 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"'
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"'
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply -f -
    logger.go:42: 10:31:56 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-428-6555938f"' /mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy/operator.yaml
    logger.go:42: 10:31:57 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created
    logger.go:42: 10:31:57 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created
    logger.go:42: 10:31:57 | monitoring/0-deploy-operator | + deploy_non_tls_cluster_secrets
    logger.go:42: 10:31:57 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf/secrets.yaml
    logger.go:42: 10:31:58 | monitoring/0-deploy-operator | secret/test-secrets created
    logger.go:42: 10:31:58 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets
    logger.go:42: 10:31:58 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf/ssl-secret.yaml
    logger.go:42: 10:31:59 | monitoring/0-deploy-operator | secret/test-ssl created
    logger.go:42: 10:31:59 | monitoring/0-deploy-operator | + deploy_client
    logger.go:42: 10:31:59 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-precious-dragon apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf/client.yaml
    logger.go:42: 10:32:00 | monitoring/0-deploy-operator | pod/mysql-client created
    logger.go:42: 10:32:00 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:00 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:00 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 10:32:02 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:02 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:02 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 10:32:03 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:03 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:04 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 10:32:05 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:05 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:05 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 10:32:07 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:07 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:07 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 10:32:08 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | INFO   Found 1 resource(s).
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | NAME                            NAMESPACE                    COL0
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | percona-server-mysql-operator   kuttl-test-precious-dragon   1
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | ASSERT PASS
    logger.go:42: 10:32:09 | monitoring/0-deploy-operator | test step completed 0-deploy-operator
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        deploy_pmm_server
        sleep 30 # wait for PMM Server to start
        
        API_KEY=$(get_pmm_api_key)
        kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY'}}']
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | + source ../../functions
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ realpath ../../..
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++++ pwd
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++++ which gdate
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | ++++ which date
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ date=/usr/bin/date
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ command -v oc
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | +++ grep '^minikube'
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | + deploy_pmm_server
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | + [[ -n '' ]]
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | + helm install monitoring -n kuttl-test-precious-dragon --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-428/kubeconfig
    logger.go:42: 10:32:09 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-428/kubeconfig
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | NAME: monitoring
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Wed Aug  7 10:32:10 2024
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-precious-dragon
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | STATUS: deployed
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | REVISION: 1
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | TEST SUITE: None
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | NOTES:
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster:
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | 
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | endpoint: https://monitoring-service.kuttl-test-precious-dragon.svc.cluster.local:443
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | login:    admin
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | password: admin
    logger.go:42: 10:32:14 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:15 | monitoring/1-deploy-pmm-server | Error from server (BadRequest): pod monitoring-0 does not have a host assigned
    logger.go:42: 10:32:15 | monitoring/1-deploy-pmm-server | + echo 'Retry '
    logger.go:42: 10:32:15 | monitoring/1-deploy-pmm-server | Retry 
    logger.go:42: 10:32:15 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:20 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:20 | monitoring/1-deploy-pmm-server | + '[' 1 -ge 20 ']'
    logger.go:42: 10:32:20 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:21 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:21 | monitoring/1-deploy-pmm-server | + echo 'Retry 1'
    logger.go:42: 10:32:21 | monitoring/1-deploy-pmm-server | Retry 1
    logger.go:42: 10:32:21 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:26 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:26 | monitoring/1-deploy-pmm-server | + '[' 2 -ge 20 ']'
    logger.go:42: 10:32:26 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:27 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:27 | monitoring/1-deploy-pmm-server | + echo 'Retry 2'
    logger.go:42: 10:32:27 | monitoring/1-deploy-pmm-server | Retry 2
    logger.go:42: 10:32:27 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:32 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:32 | monitoring/1-deploy-pmm-server | + '[' 3 -ge 20 ']'
    logger.go:42: 10:32:32 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:33 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:33 | monitoring/1-deploy-pmm-server | + echo 'Retry 3'
    logger.go:42: 10:32:33 | monitoring/1-deploy-pmm-server | Retry 3
    logger.go:42: 10:32:33 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:38 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:38 | monitoring/1-deploy-pmm-server | + '[' 4 -ge 20 ']'
    logger.go:42: 10:32:38 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:39 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:39 | monitoring/1-deploy-pmm-server | + echo 'Retry 4'
    logger.go:42: 10:32:39 | monitoring/1-deploy-pmm-server | Retry 4
    logger.go:42: 10:32:39 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:44 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:44 | monitoring/1-deploy-pmm-server | + '[' 5 -ge 20 ']'
    logger.go:42: 10:32:44 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:45 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:45 | monitoring/1-deploy-pmm-server | + echo 'Retry 5'
    logger.go:42: 10:32:45 | monitoring/1-deploy-pmm-server | Retry 5
    logger.go:42: 10:32:45 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:50 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:50 | monitoring/1-deploy-pmm-server | + '[' 6 -ge 20 ']'
    logger.go:42: 10:32:50 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:51 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:51 | monitoring/1-deploy-pmm-server | + echo 'Retry 6'
    logger.go:42: 10:32:51 | monitoring/1-deploy-pmm-server | Retry 6
    logger.go:42: 10:32:51 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:32:56 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:32:56 | monitoring/1-deploy-pmm-server | + '[' 7 -ge 20 ']'
    logger.go:42: 10:32:56 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:32:57 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:32:57 | monitoring/1-deploy-pmm-server | + echo 'Retry 7'
    logger.go:42: 10:32:57 | monitoring/1-deploy-pmm-server | Retry 7
    logger.go:42: 10:32:57 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:33:02 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:33:02 | monitoring/1-deploy-pmm-server | + '[' 8 -ge 20 ']'
    logger.go:42: 10:33:02 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:33:03 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:33:03 | monitoring/1-deploy-pmm-server | + echo 'Retry 8'
    logger.go:42: 10:33:03 | monitoring/1-deploy-pmm-server | Retry 8
    logger.go:42: 10:33:03 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:33:08 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:33:08 | monitoring/1-deploy-pmm-server | + '[' 9 -ge 20 ']'
    logger.go:42: 10:33:08 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:33:09 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring")
    logger.go:42: 10:33:09 | monitoring/1-deploy-pmm-server | + echo 'Retry 9'
    logger.go:42: 10:33:09 | monitoring/1-deploy-pmm-server | Retry 9
    logger.go:42: 10:33:09 | monitoring/1-deploy-pmm-server | + sleep 5
    logger.go:42: 10:33:14 | monitoring/1-deploy-pmm-server | + let retry+=1
    logger.go:42: 10:33:14 | monitoring/1-deploy-pmm-server | + '[' 10 -ge 20 ']'
    logger.go:42: 10:33:14 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null'
    logger.go:42: 10:33:16 | monitoring/1-deploy-pmm-server | + sleep 30
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | ++ get_pmm_api_key
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | ++ local key_name=
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]]
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | ++ key_name=operator
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD
    logger.go:42: 10:33:46 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2'
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD=admin
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | ++ jq .key
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | +++ get_service_ip monitoring-service
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | +++ local service=monitoring-service
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | +++ grep -q NotFound
    logger.go:42: 10:33:47 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:33:48 | monitoring/1-deploy-pmm-server | +++ '[' LoadBalancer = ClusterIP ']'
    logger.go:42: 10:33:48 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[]}'
    logger.go:42: 10:33:48 | monitoring/1-deploy-pmm-server | +++ egrep -q 'hostname|ip'
    logger.go:42: 10:33:48 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.133.178.156/graph/api/auth/keys
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server |   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server |                                  Dload  Upload   Total   Spent    Left  Speed
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server | 
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   155  100   119  100    36    252     76 --:--:-- --:--:-- --:--:--   329
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server | + API_KEY='"eyJrIjoiZDRucU81VzRTVkdBNmZvc1diRk1FUTl5UzlkbUZ5dGciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="'
    logger.go:42: 10:33:49 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-precious-dragon secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiZDRucU81VzRTVkdBNmZvc1diRk1FUTl5UzlkbUZ5dGciLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}'
    logger.go:42: 10:33:50 | monitoring/1-deploy-pmm-server | secret/test-secrets patched
[controller-runtime] log.SetLogger(...) was never called; logs will not be displayed.
Detected at:
	>  goroutine 20 [running]:
	>  runtime/debug.Stack()
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e
	>  sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot()
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd
	>  sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002a9c00, {0x184a055, 0x14})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e
	>  github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002a9c00}, 0x0}, {0x184a055?, 0xc00066df80?})
	>  	/home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36
	>  sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00043ca10, {0x1accd90, 0xc0002a8180}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1
	>  sigs.k8s.io/controller-runtime/pkg/client.New(0xc0000ec008?, {0x0, 0xc00043ca10, {0x1accd90, 0xc0002a8180}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d
	>  github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0000ec008, {0x0, 0xc00043ca10, {0x1accd90, 0xc0002a8180}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127
	>  github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0004e8608, 0x3?)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e
	>  github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc00022b040, 0xc0004124e0, {0xc000501f80, 0x1a})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63
	>  github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc00022b040, 0xc0004124e0, {0xc000501f80, 0x1a})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a
	>  github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc00030dd60, 0xc0004124e0, 0xc000032510)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb
	>  github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0004124e0)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e
	>  testing.tRunner(0xc0004124e0, 0xc00040c738)
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb
	>  created by testing.(*T).Run in goroutine 19
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390
    logger.go:42: 10:33:51 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | starting test step 2-create-cluster
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        get_cr \
          | yq eval '.spec.mysql.clusterType="async"' - \
        	| yq eval '.spec.pmm.enabled = true' - \
        	| yq eval '.spec.proxy.haproxy.enabled = true' - \
        	| yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \
        	| kubectl -n "${NAMESPACE}" apply -f -]
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + source ../../functions
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ realpath ../../..
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++++ pwd
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ test_name=monitoring
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++++ which gdate
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++++ which date
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ date=/usr/bin/date
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ command -v oc
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ kubectl get nodes
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | +++ grep '^minikube'
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + get_cr
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + local name_suffix=
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + kubectl -n kuttl-test-precious-dragon apply -f -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + '[' -n '' ']'
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy/cr.yaml
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-428-6555938f"' -
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:33:51 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' -
    logger.go:42: 10:33:52 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created
    logger.go:42: 10:37:35 | monitoring/2-create-cluster | test step completed 2-create-cluster
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | starting test step 3-rotate-pmm-key
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        # add new PMM API key to secret
        API_KEY_NEW=$(get_pmm_api_key "operator-new")
        kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY_NEW'}}'
        
        # delete old PMM key
        delete_pmm_api_key "operator"
        sleep 10]
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | + source ../../functions
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ realpath ../../..
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++++ pwd
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ test_name=monitoring
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++++ which gdate
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++++ which date
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ date=/usr/bin/date
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ command -v oc
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ kubectl get nodes
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ grep '^minikube'
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ get_pmm_api_key operator-new
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ local key_name=operator-new
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ [[ -z operator-new ]]
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | ++ local ADMIN_PASSWORD
    logger.go:42: 10:37:35 | monitoring/3-rotate-pmm-key | +++ kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2'
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | ++ ADMIN_PASSWORD=admin
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | ++ jq .key
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound
    logger.go:42: 10:37:37 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:38 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']'
    logger.go:42: 10:37:38 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip'
    logger.go:42: 10:37:38 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[]}'
    logger.go:42: 10:37:38 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
    logger.go:42: 10:37:38 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.133.178.156/graph/api/auth/keys
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key |   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key |                                  Dload  Upload   Total   Spent    Left  Speed
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key | 
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    40    0     0  100    40      0     93 --:--:-- --:--:-- --:--:--    93
100   167  100   127  100    40    269     85 --:--:-- --:--:-- --:--:--   354
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key | + API_KEY_NEW='"eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"'
    logger.go:42: 10:37:39 | monitoring/3-rotate-pmm-key | + kubectl patch -n kuttl-test-precious-dragon secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}'
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | secret/test-secrets patched
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | + delete_pmm_api_key operator
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | + local key_name=operator
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | + [[ -z operator ]]
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | + local ADMIN_PASSWORD
    logger.go:42: 10:37:40 | monitoring/3-rotate-pmm-key | ++ kubectl -n kuttl-test-precious-dragon exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2'
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | + ADMIN_PASSWORD=admin
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | + local key_id
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | ++ jq '.[] | select( .name == "operator").id'
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound
    logger.go:42: 10:37:41 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:42 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']'
    logger.go:42: 10:37:42 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip'
    logger.go:42: 10:37:42 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[]}'
    logger.go:42: 10:37:42 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
    logger.go:42: 10:37:42 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X GET https://admin:admin@34.133.178.156/graph/api/auth/keys
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key |   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key |                                  Dload  Upload   Total   Spent    Left  Speed
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | 
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    89  100    89    0     0    198      0 --:--:-- --:--:-- --:--:--   198
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | + key_id=1
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | ++ get_service_ip monitoring-service
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | ++ local service=monitoring-service
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:43 | monitoring/3-rotate-pmm-key | ++ grep -q NotFound
    logger.go:42: 10:37:44 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:37:44 | monitoring/3-rotate-pmm-key | ++ '[' LoadBalancer = ClusterIP ']'
    logger.go:42: 10:37:44 | monitoring/3-rotate-pmm-key | ++ egrep -q 'hostname|ip'
    logger.go:42: 10:37:44 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[]}'
    logger.go:42: 10:37:44 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
    logger.go:42: 10:37:45 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
    logger.go:42: 10:37:45 | monitoring/3-rotate-pmm-key | + curl --insecure -X DELETE https://admin:admin@34.133.178.156/graph/api/auth/keys/1
    logger.go:42: 10:37:45 | monitoring/3-rotate-pmm-key |   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
    logger.go:42: 10:37:45 | monitoring/3-rotate-pmm-key |                                  Dload  Upload   Total   Spent    Left  Speed
    logger.go:42: 10:37:46 | monitoring/3-rotate-pmm-key | 
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100    29  100    29    0     0     65      0 --:--:-- --:--:-- --:--:--    65
    logger.go:42: 10:37:46 | monitoring/3-rotate-pmm-key | {"message":"API key deleted"}+ sleep 10
    logger.go:42: 10:41:44 | monitoring/3-rotate-pmm-key | test step completed 3-rotate-pmm-key
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | starting test step 4-check-metrics
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        sleep 70 # we should wait more than one minute because `get_metric_values` gets data for the last 60 seconds
        
        API_KEY=$(kubectl get secret internal-monitoring -o jsonpath='{.data.pmmserverkey}' -n "${NAMESPACE}" | base64 --decode)
        
        for i in $(seq 0 2); do
            get_metric_values node_boot_time_seconds ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY
            get_metric_values mysql_global_status_uptime ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY
        done
        
        sleep 90 # wait for QAN
        
        get_qan20_values monitoring-mysql-0 api_key:$API_KEY
        
        haproxy_svc=$(get_service_ip "monitoring-haproxy")
        http_code=$(curl -s -o /dev/null -w "%{http_code}" http://${haproxy_svc}:8404/metrics)
        if [[ $http_code != 200 ]]; then
            echo "Error: http code is $http_code"
            exit 1
        fi]
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | + source ../../functions
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ realpath ../../..
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++++ pwd
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++ test_name=monitoring
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++++ which gdate
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | ++++ which date
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ date=/usr/bin/date
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ command -v oc
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ kubectl get nodes
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | +++ grep '^minikube'
    logger.go:42: 10:41:44 | monitoring/4-check-metrics | + sleep 70
    logger.go:42: 10:42:54 | monitoring/4-check-metrics | ++ kubectl get secret internal-monitoring -o 'jsonpath={.data.pmmserverkey}' -n kuttl-test-precious-dragon
    logger.go:42: 10:42:54 | monitoring/4-check-metrics | ++ base64 --decode
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + API_KEY=eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | ++ seq 0 2
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + for i in '$(seq 0 2)'
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-precious-dragon-monitoring-mysql-0 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-0
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + local start=1723027315
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + local end=1723027375
    logger.go:42: 10:42:55 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-precious-dragon-monitoring-mysql-0 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-0
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + local start=1723027317
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + local end=1723027377
    logger.go:42: 10:42:57 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:42:58 | monitoring/4-check-metrics | "203"
    logger.go:42: 10:42:58 | monitoring/4-check-metrics | "109"
    logger.go:42: 10:42:58 | monitoring/4-check-metrics | + for i in '$(seq 0 2)'
    logger.go:42: 10:42:58 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:42:58 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-precious-dragon-monitoring-mysql-1 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-1
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + local start=1723027319
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + local end=1723027379
    logger.go:42: 10:42:59 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:43:00 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:43:00 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:43:00 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:43:00 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-precious-dragon-monitoring-mysql-1 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-1
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + local start=1723027321
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + local end=1723027381
    logger.go:42: 10:43:01 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:43:02 | monitoring/4-check-metrics | "125"
    logger.go:42: 10:43:02 | monitoring/4-check-metrics | "273"
    logger.go:42: 10:43:02 | monitoring/4-check-metrics | + for i in '$(seq 0 2)'
    logger.go:42: 10:43:02 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:43:02 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-precious-dragon-monitoring-mysql-2 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-2
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + local start=1723027323
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + local end=1723027383
    logger.go:42: 10:43:03 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:43:04 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:43:04 | monitoring/4-check-metrics | "1723024150"
    logger.go:42: 10:43:04 | monitoring/4-check-metrics | ++ get_cluster_name
    logger.go:42: 10:43:04 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-precious-dragon get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-precious-dragon-monitoring-mysql-2 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + local instance=kuttl-test-precious-dragon-monitoring-mysql-2
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute'
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + local start=1723027325
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + local end=1723027385
    logger.go:42: 10:43:05 | monitoring/4-check-metrics | + set +o xtrace
    logger.go:42: 10:43:06 | monitoring/4-check-metrics | "54"
    logger.go:42: 10:43:06 | monitoring/4-check-metrics | "211"
    logger.go:42: 10:43:06 | monitoring/4-check-metrics | + sleep 90
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + get_qan20_values monitoring-mysql-0 api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local instance=monitoring-mysql-0
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute'
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local start=2024-08-07T10:14:36
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local end=2024-08-07T10:44:36
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local endpoint=monitoring-service
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | ++ cat
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | +++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | +++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + local 'payload={
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "columns":[
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |       "load",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |       "num_queries",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |       "query_time"
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    ],
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "first_seen": false,
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "group_by": "queryid",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "include_only_fields": [],
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "keyword": "",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "labels": [
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |        {
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |            "key": "cluster",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |            "value": ["monitoring"]
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    }],
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "limit": 10,
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "offset": 0,
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "order_by": "-load",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "main_metric": "load",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "period_start_from": "2024-08-06T22:44:36+00:00",
    logger.go:42: 10:44:36 | monitoring/4-check-metrics |    "period_start_to": "2024-08-07T10:44:36+00:00"
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | }'
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + jq '.rows[].fingerprint'
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | ++ sed 's/\n//g'
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | ++ echo '{' '"columns":[' '"load",' '"num_queries",' '"query_time"' '],' '"first_seen":' false, '"group_by":' '"queryid",' '"include_only_fields":' '[],' '"keyword":' '"",' '"labels":' '[' '{' '"key":' '"cluster",' '"value":' '["monitoring"]' '}],' '"limit":' 10, '"offset":' 0, '"order_by":' '"-load",' '"main_metric":' '"load",' '"period_start_from":' '"2024-08-06T22:44:36+00:00",' '"period_start_to":' '"2024-08-07T10:44:36+00:00"' '}'
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + run_curl -XPOST -d ''\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-08-06T22:44:36+00:00", "period_start_to": "2024-08-07T10:44:36+00:00" }'\''' https://api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport
    logger.go:42: 10:44:36 | monitoring/4-check-metrics | + kubectl -n kuttl-test-precious-dragon exec mysql-client -- bash -c 'curl -s -k -XPOST -d '\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-08-06T22:44:36+00:00", "period_start_to": "2024-08-07T10:44:36+00:00" }'\'' https://api_key:eyJrIjoiWldudUxQQ2ZwbXVKcVZzQzdpdUhRSnJVaktQWUVyNVUiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport'
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "TOTAL"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "REPLACE INTO `sys_operator` . `heartbeat` ( `ts` , `server_id` , FILE , `position` , `relay_master_log_file` , `exec_master_log_pos` ) VALUES (...)"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT `EVENT_NAME` , `COUNT_STAR` , `SUM_TIMER_WAIT` FROM `performance_schema` . `events_waits_summary_global_by_event_name`"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SHOW GLOBAL VARIABLES LIKE ?"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT COLUMN_NAME FROM `information_schema` . `columns` WHERE `table_schema` = ? AND TABLE_NAME = ? AND COLUMN_NAME IN (...) LIMIT ?"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS LIKE ?"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT NAME , `subsystem` , TYPE , COMMENT , `count` FROM `information_schema` . `innodb_metrics` WHERE `status` = ?"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT `t` . `table_schema` , `t` . `table_name` , COLUMN_NAME , AUTO_INCREMENT , `pow` ( ? , CASE `data_type` WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? END + ( `column_type` LIKE ? ) ) - ? AS `max_int` FROM `information_schema` . `columns` `c` STRAIGHT_JOIN `information_schema` . `tables` `t` ON BINARY `t` . `table_schema` = `c` . `table_schema` AND BINARY `t` . `table_name` = `c` . `table_name` WHERE `c` . `extra` = ? AND `t` . `auto_increment` IS NOT NULL"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT `conn_status` . `channel_name` AS `channel_name` , `conn_status` . `service_state` AS RELAY_THREAD , `applier_status` . `service_state` AS SQL_THREAD , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_END_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` ? , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_START_APPLY_TIMESTAMP` ? , IF ( `GTID_SUBTRACT` ( `LAST_QUEUED_TRANSACTION` , `LAST_APPLIED_TRANSACTION` ) = ?, ... , `abs` ( `time_to_sec` ( IF ( `time_to_sec` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ) = ?, ... , `timediff` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` , NOW ( ) ) ) ) ) ) `lag_in_seconds` FROM `performance_schema` . `replication_connection_status` AS `conn_status` JOIN `performance_schema` . `replication_applier_status_by_worker` AS `applier_status` ON"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | "SELECT `substring_index` ( HOST , ?, ... ) AS `slave_hostname` FROM `information_schema` . `processlist` WHERE `command` IN (...)"
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | ++ get_service_ip monitoring-haproxy
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | ++ local service=monitoring-haproxy
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:44:38 | monitoring/4-check-metrics | ++ grep -q NotFound
    logger.go:42: 10:44:39 | monitoring/4-check-metrics | +++ kubectl get service/monitoring-haproxy -n kuttl-test-precious-dragon -o 'jsonpath={.spec.type}'
    logger.go:42: 10:44:39 | monitoring/4-check-metrics | ++ '[' LoadBalancer = ClusterIP ']'
    logger.go:42: 10:44:39 | monitoring/4-check-metrics | ++ egrep -q 'hostname|ip'
    logger.go:42: 10:44:39 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[]}'
    logger.go:42: 10:44:40 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].ip}'
    logger.go:42: 10:44:40 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-precious-dragon -o 'jsonpath={.status.loadBalancer.ingress[].hostname}'
    logger.go:42: 10:44:40 | monitoring/4-check-metrics | + haproxy_svc=35.222.176.94
    logger.go:42: 10:44:40 | monitoring/4-check-metrics | ++ curl -s -o /dev/null -w '%{http_code}' http://35.222.176.94:8404/metrics
    logger.go:42: 10:44:41 | monitoring/4-check-metrics | + http_code=200
    logger.go:42: 10:44:41 | monitoring/4-check-metrics | + [[ 200 != 200 ]]
    logger.go:42: 10:44:41 | monitoring/4-check-metrics | test step completed 4-check-metrics
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | starting test step 5-check-password-leak
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        check_passwords_leak]
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | + source ../../functions
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ realpath ../../..
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++++ pwd
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++ test_name=monitoring
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++++ which gdate
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++++ which date
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ date=/usr/bin/date
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ command -v oc
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ kubectl get nodes
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | +++ grep '^minikube'
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | + check_passwords_leak
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | + local secrets
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | + local passwords
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | + local pods
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++ kubectl get secrets -o json
    logger.go:42: 10:44:41 | monitoring/5-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value'
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + secrets=
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + passwords=' '
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pods -o name
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}'
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + pods='monitoring-0
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-haproxy-0
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-haproxy-1
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-haproxy-2
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-mysql-0
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-mysql-1
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-mysql-2
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-orc-0
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-orc-1
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | monitoring-orc-2
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | mysql-client
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | percona-server-mysql-operator-547b9d576d-75gml'
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + collect_logs kuttl-test-precious-dragon
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + local containers
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + local count
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + NS=kuttl-test-precious-dragon
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:42 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + local containers=monitoring
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-0 -c monitoring
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:43 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-haproxy-0 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + local 'containers=haproxy mysql-monit pmm-client'
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-0 -c haproxy
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:44 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-0 -c mysql-monit
    logger.go:42: 10:44:45 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt
    logger.go:42: 10:44:45 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt
    logger.go:42: 10:44:45 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:45 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-0 -c pmm-client
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-haproxy-1 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + local 'containers=haproxy mysql-monit pmm-client'
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:46 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-1 -c haproxy
    logger.go:42: 10:44:47 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt
    logger.go:42: 10:44:47 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt
    logger.go:42: 10:44:47 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:47 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-1 -c mysql-monit
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-1 -c pmm-client
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:48 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-haproxy-2 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:49 | monitoring/5-check-password-leak | + local 'containers=haproxy mysql-monit pmm-client'
    logger.go:42: 10:44:49 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:49 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-2 -c haproxy
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-2 -c mysql-monit
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:50 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-haproxy-2 -c pmm-client
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:51 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-mysql-0 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + local 'containers=mysql xtrabackup pt-heartbeat pmm-client'
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-0 -c mysql
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:52 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-0 -c xtrabackup
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-0 -c pt-heartbeat
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:53 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-0 -c pmm-client
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:54 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-mysql-1 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + local 'containers=mysql xtrabackup pt-heartbeat pmm-client'
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-1 -c mysql
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:55 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-1 -c xtrabackup
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-1 -c pt-heartbeat
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:56 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-1 -c pmm-client
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | 
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:44:57 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-mysql-2 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + local 'containers=mysql xtrabackup pt-heartbeat pmm-client'
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-2 -c mysql
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:58 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-2 -c xtrabackup
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-2 -c pt-heartbeat
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:44:59 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-mysql-2 -c pmm-client
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-orc-0 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + local 'containers=orc mysql-monit'
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:00 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-0 -c orc
    logger.go:42: 10:45:01 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt
    logger.go:42: 10:45:01 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt
    logger.go:42: 10:45:01 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:01 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-0 -c mysql-monit
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-orc-1 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + local 'containers=orc mysql-monit'
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:02 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-1 -c orc
    logger.go:42: 10:45:03 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt
    logger.go:42: 10:45:03 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt
    logger.go:42: 10:45:03 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:03 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-1 -c mysql-monit
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod monitoring-orc-2 -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + local 'containers=orc mysql-monit'
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:04 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-2 -c orc
    logger.go:42: 10:45:05 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt
    logger.go:42: 10:45:05 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt
    logger.go:42: 10:45:05 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:05 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs monitoring-orc-2 -c mysql-monit
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod mysql-client -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + local containers=mysql-client
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:06 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs mysql-client -c mysql-client
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + for p in '$pods'
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-precious-dragon get pod percona-server-mysql-operator-547b9d576d-75gml -o 'jsonpath={.spec.containers[*].name}'
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + local containers=manager
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + for c in '$containers'
    logger.go:42: 10:45:07 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-precious-dragon logs percona-server-mysql-operator-547b9d576d-75gml -c manager
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-547b9d576d-75gml-manager.txt
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-547b9d576d-75gml-manager.txt
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | + echo
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | 
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | + '[' -n '' ']'
    logger.go:42: 10:45:08 | monitoring/5-check-password-leak | test step completed 5-check-password-leak
    logger.go:42: 10:45:08 | monitoring/98-drop-finalizer | starting test step 98-drop-finalizer
    logger.go:42: 10:45:09 | monitoring/98-drop-finalizer | PerconaServerMySQL:kuttl-test-precious-dragon/monitoring updated
    logger.go:42: 10:45:09 | monitoring/98-drop-finalizer | test step completed 98-drop-finalizer
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        destroy_operator]
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | + source ../../functions
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ realpath ../../..
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++++ pwd
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/tests/monitoring
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++ test_name=monitoring
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/vars.sh
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-428
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/deploy
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-428/e2e-tests/conf
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-428
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-428
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export VERSION=PR-428-6555938f
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ VERSION=PR-428-6555938f
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-428-6555938f
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=9.9.9
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.15.1
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++++ which gdate
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-428/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | ++++ which date
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ date=/usr/bin/date
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ command -v oc
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ kubectl get nodes
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | +++ grep '^minikube'
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | + destroy_operator
    logger.go:42: 10:45:09 | monitoring/99-remove-cluster-gracefully | + kubectl -n kuttl-test-precious-dragon delete deployment percona-server-mysql-operator --force --grace-period=0
    logger.go:42: 10:45:10 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 10:45:10 | monitoring/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted
    logger.go:42: 10:45:10 | monitoring/99-remove-cluster-gracefully | + [[ -n '' ]]
    logger.go:42: 10:45:10 | monitoring/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully
    logger.go:42: 10:45:11 | monitoring | monitoring events from ns kuttl-test-precious-dragon:
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:57 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml		Scheduled	Successfully assigned kuttl-test-precious-dragon/percona-server-mysql-operator-547b9d576d-75gml to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:57 +0000 UTC	Normal	ReplicaSet.apps percona-server-mysql-operator-547b9d576d		SuccessfulCreate	Created pod: percona-server-mysql-operator-547b9d576d-75gml	replicaset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:57 +0000 UTC	Normal	Deployment.apps percona-server-mysql-operator		ScalingReplicaSet	Scaled up replica set percona-server-mysql-operator-547b9d576d to 1	deployment-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:58 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml.spec.containers{manager}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:58 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml.spec.containers{manager}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 98ms (98ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:58 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml.spec.containers{manager}		Created	Created container manager	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:58 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml.spec.containers{manager}		Started	Started container manager	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:31:59 +0000 UTC	Normal	Lease.coordination.k8s.io 08db2feb.percona.com		LeaderElection	percona-server-mysql-operator-547b9d576d-75gml_78202100-57c4-4000-92d0-688699195629 became leader	percona-server-mysql-operator-547b9d576d-75gml_78202100-57c4-4000-92d0-688699195629	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:00 +0000 UTC	Normal	Pod mysql-client		Scheduled	Successfully assigned kuttl-test-precious-dragon/mysql-client to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:00 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Pulled	Container image "percona/percona-server:8.0.33" already present on machine	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:00 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Created	Created container mysql-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:00 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Started	Started container mysql-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:13 +0000 UTC	Normal	StatefulSet.apps monitoring		SuccessfulCreate	create Claim pmmdata-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:13 +0000 UTC	Normal	StatefulSet.apps monitoring		SuccessfulCreate	create Pod monitoring-0 in StatefulSet monitoring successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:13 +0000 UTC	Normal	PersistentVolumeClaim pmmdata-monitoring-0		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:14 +0000 UTC	Normal	Service monitoring-service		EnsuringLoadBalancer	Ensuring load balancer	service-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:14 +0000 UTC	Normal	PersistentVolumeClaim pmmdata-monitoring-0		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-precious-dragon/pmmdata-monitoring-0"	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:14 +0000 UTC	Normal	PersistentVolumeClaim pmmdata-monitoring-0		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:18 +0000 UTC	Normal	PersistentVolumeClaim pmmdata-monitoring-0		ProvisioningSucceeded	Successfully provisioned volume pvc-21024787-d3cb-445e-a1cd-394860b0395f	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:19 +0000 UTC	Normal	Pod monitoring-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:26 +0000 UTC	Normal	Pod monitoring-0		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-21024787-d3cb-445e-a1cd-394860b0395f" 	attachdetach-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:29 +0000 UTC	Normal	Pod monitoring-0.spec.containers{monitoring}		Pulling	Pulling image "perconalab/pmm-server:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:32:51 +0000 UTC	Normal	Service monitoring-service		EnsuredLoadBalancer	Ensured load balancer	service-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:12 +0000 UTC	Normal	Pod monitoring-0.spec.containers{monitoring}		Pulled	Successfully pulled image "perconalab/pmm-server:dev-latest" in 43.339s (43.339s including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:12 +0000 UTC	Normal	Pod monitoring-0.spec.containers{monitoring}		Created	Created container monitoring	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:12 +0000 UTC	Normal	Pod monitoring-0.spec.containers{monitoring}		Started	Started container monitoring	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:56 +0000 UTC	Normal	Service monitoring-haproxy		EnsuringLoadBalancer	Ensuring load balancer	service-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-0		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-0		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-0		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-precious-dragon/datadir-monitoring-mysql-0"	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	Pod monitoring-orc-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-orc-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-1t0z	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:57 +0000 UTC	Normal	StatefulSet.apps monitoring-orc		SuccessfulCreate	create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:58 +0000 UTC	Normal	Pod monitoring-orc-0.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:58 +0000 UTC	Normal	Pod monitoring-orc-0.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 108ms (108ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:58 +0000 UTC	Normal	Pod monitoring-orc-0.spec.initContainers{orc-init}		Created	Created container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:33:58 +0000 UTC	Normal	Pod monitoring-orc-0.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 97ms (97ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{orc}		Created	Created container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 90ms (90ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:00 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:01 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-0		ProvisioningSucceeded	Successfully provisioned volume pvc-2776c152-50f4-4ec4-bd0b-23c3193f2b6d	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:01 +0000 UTC	Normal	Pod monitoring-mysql-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:08 +0000 UTC	Normal	Pod monitoring-mysql-0		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-2776c152-50f4-4ec4-bd0b-23c3193f2b6d" 	attachdetach-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:09 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:09 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 107ms (107ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:09 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:10 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 92ms (92ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 104ms (104ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 99ms (99ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:11 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:22 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 10.357s (10.357s including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:22 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:22 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:24 +0000 UTC	Normal	Service monitoring-haproxy		EnsuredLoadBalancer	Ensured load balancer	service-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	Pod monitoring-orc-1		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-orc-1 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	Pod monitoring-orc-1.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	Pod monitoring-orc-1.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 111ms (111ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	Pod monitoring-orc-1.spec.initContainers{orc-init}		Created	Created container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	Pod monitoring-orc-1.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:33 +0000 UTC	Normal	StatefulSet.apps monitoring-orc		SuccessfulCreate	create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 88ms (88ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{orc}		Created	Created container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 107ms (107ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:36 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:44 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-1		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:44 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-1		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-precious-dragon/datadir-monitoring-mysql-1"	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:44 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-1		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:44 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:44 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:48 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-1		ProvisioningSucceeded	Successfully provisioned volume pvc-0353b443-94a7-4028-8f7d-fd8ef0ca2598	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:48 +0000 UTC	Normal	Pod monitoring-mysql-1		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-1 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-1t0z	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:53 +0000 UTC	Normal	Pod monitoring-mysql-1		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-0353b443-94a7-4028-8f7d-fd8ef0ca2598" 	attachdetach-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:56 +0000 UTC	Normal	Pod monitoring-haproxy-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:56 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulCreate	create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:57 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:57 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 162ms (162ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:57 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:57 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 105ms (105ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94ms (94ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:34:59 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:00 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 117ms (117ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:00 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:00 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 82ms (82ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 87ms (87ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 91ms (91ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:01 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:02 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:02 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:07 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 8.032s (8.032s including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:07 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:07 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	Pod monitoring-haproxy-1		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-1 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 155ms (156ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulCreate	create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	Pod monitoring-orc-2		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-orc-2 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:08 +0000 UTC	Normal	StatefulSet.apps monitoring-orc		SuccessfulCreate	create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-orc-2.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-orc-2.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 176ms (176ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-orc-2.spec.initContainers{orc-init}		Created	Created container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:09 +0000 UTC	Normal	Pod monitoring-orc-2.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:10 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 7.954s (7.954s including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:10 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:10 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 129ms (129ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 108ms (108ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 90ms (90ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{orc}		Created	Created container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 112ms (112ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:11 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 100ms (100ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-haproxy-2		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-2 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-1t0z	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulCreate	create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:12 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:13 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:13 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 126ms (126ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:13 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:13 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 114ms (114ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 111ms (111ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:18 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:19 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 132ms (133ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:19 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:19 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:19 +0000 UTC	Warning	Pod monitoring-mysql-1.spec.containers{mysql}		Unhealthy	Startup probe failed: 	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:20 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:35:24 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 89ms (89ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:14 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-2		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:14 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-2		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:14 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:14 +0000 UTC	Normal	StatefulSet.apps monitoring-mysql		SuccessfulCreate	create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:15 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-2		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-precious-dragon/datadir-monitoring-mysql-2"	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:18 +0000 UTC	Normal	PersistentVolumeClaim datadir-monitoring-mysql-2		ProvisioningSucceeded	Successfully provisioned volume pvc-9440f69f-ba0e-43a7-bd00-785eaadf7621	pd.csi.storage.gke.io_gke-fc9cc93e46f343588bb4-40f6-88d6-vm_778e273e-4616-461d-86ca-0c520e1f86f3	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:19 +0000 UTC	Normal	Pod monitoring-mysql-2		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-2 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:26 +0000 UTC	Normal	Pod monitoring-mysql-2		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-9440f69f-ba0e-43a7-bd00-785eaadf7621" 	attachdetach-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:27 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:28 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 165ms (165ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:28 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:28 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 108ms (108ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 113ms (113ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:30 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 101ms (101ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 157ms (157ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:31 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:51 +0000 UTC	Warning	Pod monitoring-mysql-2.spec.containers{mysql}		Unhealthy	Startup probe failed: 	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:51 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:36:54 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 76ms (76ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:50 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:50 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:50 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:50 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulDelete	delete Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:51 +0000 UTC	Normal	Pod monitoring-haproxy-2		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-2 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-1t0z	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:51 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:51 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 99ms (99ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:51 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:51 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 91ms (91ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 93ms (93ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 130ms (130ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:54 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:55 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:55 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:55 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:55 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulDelete	delete Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:56 +0000 UTC	Normal	Pod monitoring-haproxy-1		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-1 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:56 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:56 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 92ms (92ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:56 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:57 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 98ms (98ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 82ms (82ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 94ms (94ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:37:59 +0000 UTC	Normal	Pod monitoring-mysql-1		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-1 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-1t0z	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:00 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:00 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:00 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:00 +0000 UTC	Normal	StatefulSet.apps monitoring-haproxy		SuccessfulDelete	delete Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful	statefulset-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:03 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:03 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 92ms (92ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:03 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:03 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 92ms (92ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 96ms (96ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 99ms (99ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:06 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 100ms (100ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:07 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:07 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:11 +0000 UTC	Normal	Pod monitoring-haproxy-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-haproxy-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:11 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:11 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 108ms (108ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:11 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Created	Created container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:11 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:13 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:13 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 90ms (90ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:13 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Created	Created container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 84ms (84ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Created	Created container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 93ms (93ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:14 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:24 +0000 UTC	Warning	Pod monitoring-mysql-1.spec.containers{mysql}		Unhealthy	Startup probe failed: 	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:24 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:38:28 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 88ms (88ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:04 +0000 UTC	Normal	Pod monitoring-mysql-2		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-2 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-qgt6	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:08 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:08 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 99ms (99ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:08 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:08 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 117ms (117ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 133ms (133ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 91ms (91ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:11 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:12 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 146ms (146ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:12 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:12 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:29 +0000 UTC	Warning	Pod monitoring-mysql-2.spec.containers{mysql}		Unhealthy	Startup probe failed: 	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:29 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:39:33 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 99ms (99ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:05 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Killing	Stopping container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:05 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:05 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:05 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Killing	Stopping container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:09 +0000 UTC	Warning	Pod monitoring-mysql-0.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2024/08/07 10:40:09 readiness check failed: connect to db: ping DB: dial tcp 10.235.1.25:33062: connect: connection refused
        	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:14 +0000 UTC	Warning	Pod monitoring-mysql-0.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2024/08/07 10:40:14 readiness check failed: connect to db: ping DB: dial tcp 10.235.1.25:33062: connect: connection refused
        	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:25 +0000 UTC	Normal	Pod monitoring-mysql-0		Scheduled	Successfully assigned kuttl-test-precious-dragon/monitoring-mysql-0 to gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41	default-scheduler	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:26 +0000 UTC	Warning	Pod monitoring-mysql-0		FailedMount	Unable to attach or mount volumes: unmounted volumes=[config datadir], unattached volumes=[], failed to process volumes=[datadir]: error processing PVC kuttl-test-precious-dragon/datadir-monitoring-mysql-0: failed to fetch PVC from API server: persistentvolumeclaims "datadir-monitoring-mysql-0" is forbidden: User "system:node:gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41" cannot get resource "persistentvolumeclaims" in API group "" in the namespace "kuttl-test-precious-dragon": no relationship found between node 'gke-jen-ps-428-6555938f--default-pool-39e5cfaa-sz41' and this object	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:27 +0000 UTC	Warning	Pod monitoring-mysql-0		FailedMount	MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:42 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-428-6555938f"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:42 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-428-6555938f" in 101ms (101ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:42 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Created	Created container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:43 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 116ms (116ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Created	Created container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 113ms (113ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:44 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Created	Created container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 225ms (225ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Created	Created container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Pulling	Pulling image "perconalab/pmm-client:dev-latest"	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Pulled	Successfully pulled image "perconalab/pmm-client:dev-latest" in 102ms (102ms including waiting)	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Created	Created container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:40:45 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{pmm-client}		Started	Started container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:41:03 +0000 UTC	Warning	Pod monitoring-mysql-0.spec.containers{mysql}		Unhealthy	Startup probe failed: 	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:41:03 +0000 UTC	Normal	Pod monitoring-mysql-0.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:09 +0000 UTC	Normal	Service monitoring-haproxy		DeletingLoadBalancer	Deleting load balancer	service-controller	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-0.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-1.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{pmm-client}		Killing	Stopping container pmm-client	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-haproxy-2.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-mysql-1.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-mysql-2.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-0.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-1.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod monitoring-orc-2.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 10:45:11 | monitoring | 2024-08-07 10:45:10 +0000 UTC	Normal	Pod percona-server-mysql-operator-547b9d576d-75gml.spec.containers{manager}		Killing	Stopping container manager	kubelet	
    logger.go:42: 10:45:11 | monitoring | Deleting namespace: kuttl-test-precious-dragon
=== NAME  kuttl
    harness.go:407: run tests finished
    harness.go:515: cleaning up
    harness.go:572: removing temp folder: ""
--- PASS: kuttl (846.90s)
    --- PASS: kuttl/harness (0.00s)
        --- PASS: kuttl/harness/monitoring (846.47s)
PASS