=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://34.31.227.138 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 30 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring === PAUSE kuttl/harness/monitoring === CONT kuttl/harness/monitoring logger.go:42: 14:42:34 | monitoring | Creating namespace: kuttl-test-concrete-adder logger.go:42: 14:42:34 | monitoring/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 14:42:34 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 14:42:34 | monitoring/0-deploy-operator | + source ../../functions logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ realpath ../../.. logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++++ pwd logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++ test_name=monitoring logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-523 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export VERSION=PR-523-f00253e logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ VERSION=PR-523-f00253e logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++++ which gdate logger.go:42: 14:42:34 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:42:34 | monitoring/0-deploy-operator | ++++ which date logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ command -v oc logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ kubectl get nodes logger.go:42: 14:42:34 | monitoring/0-deploy-operator | +++ grep '^minikube' logger.go:42: 14:42:35 | monitoring/0-deploy-operator | + init_temp_dir logger.go:42: 14:42:35 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring logger.go:42: 14:42:35 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring logger.go:42: 14:42:35 | monitoring/0-deploy-operator | + deploy_operator logger.go:42: 14:42:35 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/crd.yaml logger.go:42: 14:42:36 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 14:42:36 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 14:42:37 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 14:42:37 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/rbac.yaml logger.go:42: 14:42:38 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 14:42:38 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator-orchestrator created logger.go:42: 14:42:39 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:42:39 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:42:39 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:42:40 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:42:40 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:42:40 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:42:40 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 14:42:40 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 14:42:40 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply -f - logger.go:42: 14:42:40 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:42:40 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-523-f00253e"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/operator.yaml logger.go:42: 14:42:41 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 14:42:42 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 14:42:42 | monitoring/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 14:42:42 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/secrets.yaml logger.go:42: 14:42:43 | monitoring/0-deploy-operator | secret/test-secrets created logger.go:42: 14:42:43 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 14:42:43 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/ssl-secret.yaml logger.go:42: 14:42:43 | monitoring/0-deploy-operator | secret/test-ssl created logger.go:42: 14:42:43 | monitoring/0-deploy-operator | + deploy_client logger.go:42: 14:42:43 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-concrete-adder apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/client.yaml logger.go:42: 14:42:44 | monitoring/0-deploy-operator | pod/mysql-client created logger.go:42: 14:42:53 | monitoring/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm_server sleep 30 # wait for PMM Server to start API_KEY=$(get_pmm_api_key) kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY'}}'] logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | + source ../../functions logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++++ pwd logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-523 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-523-f00253e logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-523-f00253e logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++++ which gdate logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | ++++ which date logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ date=/usr/bin/date logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ command -v oc logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes logger.go:42: 14:42:53 | monitoring/1-deploy-pmm-server | +++ grep '^minikube' logger.go:42: 14:42:54 | monitoring/1-deploy-pmm-server | + deploy_pmm_server logger.go:42: 14:42:54 | monitoring/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 14:42:54 | monitoring/1-deploy-pmm-server | + helm install monitoring -n kuttl-test-concrete-adder --set imageTag=dev-latest --set imageRepo=perconalab/pmm-server https://percona-charts.storage.googleapis.com/pmm-server-9.9.9.tgz logger.go:42: 14:42:54 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:42:54 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | NAME: monitoring logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Mon Feb 5 14:42:55 2024 logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-concrete-adder logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | STATUS: deployed logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | REVISION: 1 logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | NOTES: logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | PMM server can be accessed via HTTPS (port 443) on the following DNS name from within your cluster: logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | endpoint: https://monitoring-service.kuttl-test-concrete-adder.svc.cluster.local:443 logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | login: admin logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | password: admin logger.go:42: 14:42:56 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:42:57 | monitoring/1-deploy-pmm-server | Error from server (BadRequest): pod monitoring-0 does not have a host assigned logger.go:42: 14:42:57 | monitoring/1-deploy-pmm-server | + echo 'Retry ' logger.go:42: 14:42:57 | monitoring/1-deploy-pmm-server | Retry logger.go:42: 14:42:57 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:02 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:02 | monitoring/1-deploy-pmm-server | + '[' 1 -ge 20 ']' logger.go:42: 14:43:02 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:03 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:03 | monitoring/1-deploy-pmm-server | + echo 'Retry 1' logger.go:42: 14:43:03 | monitoring/1-deploy-pmm-server | Retry 1 logger.go:42: 14:43:03 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | + '[' 2 -ge 20 ']' logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | + echo 'Retry 2' logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | Retry 2 logger.go:42: 14:43:08 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:13 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:13 | monitoring/1-deploy-pmm-server | + '[' 3 -ge 20 ']' logger.go:42: 14:43:13 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:14 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:14 | monitoring/1-deploy-pmm-server | + echo 'Retry 3' logger.go:42: 14:43:14 | monitoring/1-deploy-pmm-server | Retry 3 logger.go:42: 14:43:14 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:19 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:19 | monitoring/1-deploy-pmm-server | + '[' 4 -ge 20 ']' logger.go:42: 14:43:19 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:20 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:20 | monitoring/1-deploy-pmm-server | + echo 'Retry 4' logger.go:42: 14:43:20 | monitoring/1-deploy-pmm-server | Retry 4 logger.go:42: 14:43:20 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:25 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:25 | monitoring/1-deploy-pmm-server | + '[' 5 -ge 20 ']' logger.go:42: 14:43:25 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:26 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:26 | monitoring/1-deploy-pmm-server | + echo 'Retry 5' logger.go:42: 14:43:26 | monitoring/1-deploy-pmm-server | Retry 5 logger.go:42: 14:43:26 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | + '[' 6 -ge 20 ']' logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | + echo 'Retry 6' logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | Retry 6 logger.go:42: 14:43:31 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:36 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:36 | monitoring/1-deploy-pmm-server | + '[' 7 -ge 20 ']' logger.go:42: 14:43:36 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:37 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:37 | monitoring/1-deploy-pmm-server | + echo 'Retry 7' logger.go:42: 14:43:37 | monitoring/1-deploy-pmm-server | Retry 7 logger.go:42: 14:43:37 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:42 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:42 | monitoring/1-deploy-pmm-server | + '[' 8 -ge 20 ']' logger.go:42: 14:43:42 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:43 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("monitoring") logger.go:42: 14:43:43 | monitoring/1-deploy-pmm-server | + echo 'Retry 8' logger.go:42: 14:43:43 | monitoring/1-deploy-pmm-server | Retry 8 logger.go:42: 14:43:43 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 14:43:48 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 14:43:48 | monitoring/1-deploy-pmm-server | + '[' 9 -ge 20 ']' logger.go:42: 14:43:48 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 14:43:49 | monitoring/1-deploy-pmm-server | + sleep 30 logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | ++ get_pmm_api_key logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | ++ local key_name= logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]] logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | ++ key_name=operator logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD logger.go:42: 14:44:19 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD=admin logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | ++ jq .key logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | +++ get_service_ip monitoring-service logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | +++ local service=monitoring-service logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | +++ grep -q NotFound logger.go:42: 14:44:20 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:44:21 | monitoring/1-deploy-pmm-server | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 14:44:21 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 14:44:21 | monitoring/1-deploy-pmm-server | +++ egrep -q 'hostname|ip' logger.go:42: 14:44:21 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 14:44:22 | monitoring/1-deploy-pmm-server | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 14:44:22 | monitoring/1-deploy-pmm-server | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator", "role": "Admin"}' https://admin:admin@34.123.89.122/graph/api/auth/keys logger.go:42: 14:44:22 | monitoring/1-deploy-pmm-server | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 14:44:22 | monitoring/1-deploy-pmm-server | Dload Upload Total Spent Left Speed logger.go:42: 14:44:30 | monitoring/1-deploy-pmm-server | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0 100 155 100 119 100 36 15 4 0:00:09 0:00:07 0:00:02 43 logger.go:42: 14:44:30 | monitoring/1-deploy-pmm-server | + API_KEY='"eyJrIjoiWVRuR2pTaDdybVZ0SWRzQVVMOG9UcENCQ09qWUhvTE8iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="' logger.go:42: 14:44:30 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-concrete-adder secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiWVRuR2pTaDdybVZ0SWRzQVVMOG9UcENCQ09qWUhvTE8iLCJuIjoib3BlcmF0b3IiLCJpZCI6MX0="}}' logger.go:42: 14:44:30 | monitoring/1-deploy-pmm-server | secret/test-secrets patched logger.go:42: 14:44:32 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 14:44:32 | monitoring/2-create-cluster | starting test step 2-create-cluster logger.go:42: 14:44:32 | monitoring/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.pmm.enabled = true' - \ | yq eval '.spec.proxy.haproxy.enabled = true' - \ | yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:44:32 | monitoring/2-create-cluster | + source ../../functions logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ realpath ../../.. logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++++ pwd logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ test_name=monitoring logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-523 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export VERSION=PR-523-f00253e logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ VERSION=PR-523-f00253e logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++++ which gdate logger.go:42: 14:44:32 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++++ which date logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ command -v oc logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ kubectl get nodes logger.go:42: 14:44:32 | monitoring/2-create-cluster | +++ grep '^minikube' logger.go:42: 14:44:32 | monitoring/2-create-cluster | + get_cr logger.go:42: 14:44:32 | monitoring/2-create-cluster | + local name_suffix= logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:44:32 | monitoring/2-create-cluster | + kubectl -n kuttl-test-concrete-adder apply -f - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + '[' -n '' ']' logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:44:32 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:32 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:44:33 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created logger.go:42: 14:48:04 | monitoring/2-create-cluster | test step completed 2-create-cluster logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | starting test step 3-rotate-pmm-key logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | running command: [sh -c set -o errexit set -o xtrace source ../../functions # add new PMM API key to secret API_KEY_NEW=$(get_pmm_api_key "operator-new") kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": '$API_KEY_NEW'}}' # delete old PMM key delete_pmm_api_key "operator" sleep 10] logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | + source ../../functions logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ realpath ../../.. logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++++ pwd logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ test_name=monitoring logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++++ which gdate logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++++ which date logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ date=/usr/bin/date logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ command -v oc logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ kubectl get nodes logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ grep '^minikube' logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ get_pmm_api_key operator-new logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ local key_name=operator-new logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ [[ -z operator-new ]] logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | ++ local ADMIN_PASSWORD logger.go:42: 14:48:04 | monitoring/3-rotate-pmm-key | +++ kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | ++ ADMIN_PASSWORD=admin logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | ++ jq .key logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip' logger.go:42: 14:48:06 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 14:48:07 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 14:48:07 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 14:48:07 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new", "role": "Admin"}' https://admin:admin@34.123.89.122/graph/api/auth/keys logger.go:42: 14:48:07 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 14:48:07 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 167 100 127 100 40 263 83 --:--:-- --:--:-- --:--:-- 347 logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + API_KEY_NEW='"eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"' logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + kubectl patch -n kuttl-test-concrete-adder secret test-secrets --type merge --patch '{"stringData": {"pmmserverkey": "eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9"}}' logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | secret/test-secrets patched logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + delete_pmm_api_key operator logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + local key_name=operator logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + [[ -z operator ]] logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | + local ADMIN_PASSWORD logger.go:42: 14:48:08 | monitoring/3-rotate-pmm-key | ++ kubectl -n kuttl-test-concrete-adder exec monitoring-0 -- bash -c 'printenv | grep ADMIN_PASSWORD | cut -d '\''='\'' -f2' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | + ADMIN_PASSWORD=admin logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | + local key_id logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | ++ jq '.[] | select( .name == "operator").id' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ get_service_ip monitoring-service logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ local service=monitoring-service logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ grep -q NotFound logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | ++++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 14:48:10 | monitoring/3-rotate-pmm-key | +++ egrep -q 'hostname|ip' logger.go:42: 14:48:11 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 14:48:11 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 14:48:11 | monitoring/3-rotate-pmm-key | ++ curl --insecure -X GET https://admin:admin@34.123.89.122/graph/api/auth/keys logger.go:42: 14:48:11 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 14:48:11 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 89 100 89 0 0 187 0 --:--:-- --:--:-- --:--:-- 187 logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | + key_id=1 logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | ++ get_service_ip monitoring-service logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | ++ local service=monitoring-service logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | ++ grep -q NotFound logger.go:42: 14:48:12 | monitoring/3-rotate-pmm-key | +++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:48:13 | monitoring/3-rotate-pmm-key | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 14:48:13 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 14:48:13 | monitoring/3-rotate-pmm-key | ++ egrep -q 'hostname|ip' logger.go:42: 14:48:13 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 14:48:13 | monitoring/3-rotate-pmm-key | ++ kubectl get service/monitoring-service -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 14:48:14 | monitoring/3-rotate-pmm-key | + curl --insecure -X DELETE https://admin:admin@34.123.89.122/graph/api/auth/keys/1 logger.go:42: 14:48:14 | monitoring/3-rotate-pmm-key | % Total % Received % Xferd Average Speed Time Time Time Current logger.go:42: 14:48:14 | monitoring/3-rotate-pmm-key | Dload Upload Total Spent Left Speed logger.go:42: 14:48:14 | monitoring/3-rotate-pmm-key | 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 29 100 29 0 0 63 0 --:--:-- --:--:-- --:--:-- 63 logger.go:42: 14:48:14 | monitoring/3-rotate-pmm-key | {"message":"API key deleted"}+ sleep 10 logger.go:42: 14:52:18 | monitoring/3-rotate-pmm-key | test step completed 3-rotate-pmm-key logger.go:42: 14:52:18 | monitoring/4-check-metrics | starting test step 4-check-metrics logger.go:42: 14:52:18 | monitoring/4-check-metrics | running command: [sh -c set -o errexit set -o xtrace source ../../functions sleep 70 # we should wait more than one minute because `get_metric_values` gets data for the last 60 seconds API_KEY=$(kubectl get secret internal-monitoring -o jsonpath='{.data.pmmserverkey}' -n "${NAMESPACE}" | base64 --decode) for i in $(seq 0 2); do get_metric_values node_boot_time_seconds ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY get_metric_values mysql_global_status_uptime ${NAMESPACE}-$(get_cluster_name)-mysql-${i} api_key:$API_KEY done sleep 90 # wait for QAN get_qan20_values monitoring-mysql-0 api_key:$API_KEY haproxy_svc=$(get_service_ip "monitoring-haproxy") http_code=$(curl -s -o /dev/null -w "%{http_code}" http://${haproxy_svc}:8404/metrics) if [[ $http_code != 200 ]]; then echo "Error: http code is $http_code" exit 1 fi] logger.go:42: 14:52:18 | monitoring/4-check-metrics | + source ../../functions logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ realpath ../../.. logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++++ pwd logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++ test_name=monitoring logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ GIT_BRANCH=PR-523 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export VERSION=PR-523-f00253e logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ VERSION=PR-523-f00253e logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++++ which gdate logger.go:42: 14:52:18 | monitoring/4-check-metrics | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:52:18 | monitoring/4-check-metrics | ++++ which date logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ date=/usr/bin/date logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ command -v oc logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ kubectl get nodes logger.go:42: 14:52:18 | monitoring/4-check-metrics | +++ grep '^minikube' logger.go:42: 14:52:19 | monitoring/4-check-metrics | + sleep 70 logger.go:42: 14:53:29 | monitoring/4-check-metrics | ++ kubectl get secret internal-monitoring -o 'jsonpath={.data.pmmserverkey}' -n kuttl-test-concrete-adder logger.go:42: 14:53:29 | monitoring/4-check-metrics | ++ base64 --decode logger.go:42: 14:53:29 | monitoring/4-check-metrics | + API_KEY=eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:29 | monitoring/4-check-metrics | ++ seq 0 2 logger.go:42: 14:53:29 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 14:53:29 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:29 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:30 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-concrete-adder-monitoring-mysql-0 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:30 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 14:53:30 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-0 logger.go:42: 14:53:30 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:30 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:30 | monitoring/4-check-metrics | + local start=1707144750 logger.go:42: 14:53:30 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:30 | monitoring/4-check-metrics | + local end=1707144810 logger.go:42: 14:53:30 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:31 | monitoring/4-check-metrics | "1707141218" logger.go:42: 14:53:31 | monitoring/4-check-metrics | "1707141218" logger.go:42: 14:53:31 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:31 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:31 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-concrete-adder-monitoring-mysql-0 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:31 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 14:53:31 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-0 logger.go:42: 14:53:31 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:31 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:31 | monitoring/4-check-metrics | + local start=1707144751 logger.go:42: 14:53:31 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:31 | monitoring/4-check-metrics | + local end=1707144811 logger.go:42: 14:53:31 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:33 | monitoring/4-check-metrics | "189" logger.go:42: 14:53:33 | monitoring/4-check-metrics | "114" logger.go:42: 14:53:33 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 14:53:33 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:33 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:33 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-concrete-adder-monitoring-mysql-1 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:33 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 14:53:33 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-1 logger.go:42: 14:53:33 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:33 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:33 | monitoring/4-check-metrics | + local start=1707144753 logger.go:42: 14:53:33 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:33 | monitoring/4-check-metrics | + local end=1707144813 logger.go:42: 14:53:33 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:34 | monitoring/4-check-metrics | "1707141221" logger.go:42: 14:53:34 | monitoring/4-check-metrics | "1707141221" logger.go:42: 14:53:34 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:34 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:35 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-concrete-adder-monitoring-mysql-1 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:35 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 14:53:35 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-1 logger.go:42: 14:53:35 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:35 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:35 | monitoring/4-check-metrics | + local start=1707144755 logger.go:42: 14:53:35 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:35 | monitoring/4-check-metrics | + local end=1707144815 logger.go:42: 14:53:35 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:36 | monitoring/4-check-metrics | "117" logger.go:42: 14:53:36 | monitoring/4-check-metrics | "279" logger.go:42: 14:53:36 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 14:53:36 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:36 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:36 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-concrete-adder-monitoring-mysql-2 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:36 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 14:53:36 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-2 logger.go:42: 14:53:36 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:36 | monitoring/4-check-metrics | + local start=1707144756 logger.go:42: 14:53:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:36 | monitoring/4-check-metrics | + local end=1707144816 logger.go:42: 14:53:36 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:38 | monitoring/4-check-metrics | "1707141217" logger.go:42: 14:53:38 | monitoring/4-check-metrics | "1707141217" logger.go:42: 14:53:38 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 14:53:38 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-concrete-adder get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:53:38 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-concrete-adder-monitoring-mysql-2 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:38 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 14:53:38 | monitoring/4-check-metrics | + local instance=kuttl-test-concrete-adder-monitoring-mysql-2 logger.go:42: 14:53:38 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:53:38 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 14:53:38 | monitoring/4-check-metrics | + local start=1707144758 logger.go:42: 14:53:38 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 14:53:38 | monitoring/4-check-metrics | + local end=1707144818 logger.go:42: 14:53:38 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 14:53:39 | monitoring/4-check-metrics | "48" logger.go:42: 14:53:39 | monitoring/4-check-metrics | "208" logger.go:42: 14:53:39 | monitoring/4-check-metrics | + sleep 90 logger.go:42: 14:55:09 | monitoring/4-check-metrics | + get_qan20_values monitoring-mysql-0 api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local instance=monitoring-mysql-0 logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local user_pass=api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9 logger.go:42: 14:55:09 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local start=2024-02-05T14:25:09 logger.go:42: 14:55:09 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local end=2024-02-05T14:55:09 logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local endpoint=monitoring-service logger.go:42: 14:55:09 | monitoring/4-check-metrics | ++ cat logger.go:42: 14:55:09 | monitoring/4-check-metrics | +++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 14:55:09 | monitoring/4-check-metrics | +++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 14:55:09 | monitoring/4-check-metrics | + local 'payload={ logger.go:42: 14:55:09 | monitoring/4-check-metrics | "columns":[ logger.go:42: 14:55:09 | monitoring/4-check-metrics | "load", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "num_queries", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "query_time" logger.go:42: 14:55:09 | monitoring/4-check-metrics | ], logger.go:42: 14:55:09 | monitoring/4-check-metrics | "first_seen": false, logger.go:42: 14:55:09 | monitoring/4-check-metrics | "group_by": "queryid", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "include_only_fields": [], logger.go:42: 14:55:09 | monitoring/4-check-metrics | "keyword": "", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "labels": [ logger.go:42: 14:55:09 | monitoring/4-check-metrics | { logger.go:42: 14:55:09 | monitoring/4-check-metrics | "key": "cluster", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "value": ["monitoring"] logger.go:42: 14:55:09 | monitoring/4-check-metrics | }], logger.go:42: 14:55:09 | monitoring/4-check-metrics | "limit": 10, logger.go:42: 14:55:09 | monitoring/4-check-metrics | "offset": 0, logger.go:42: 14:55:09 | monitoring/4-check-metrics | "order_by": "-load", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "main_metric": "load", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "period_start_from": "2024-02-05T02:55:09+00:00", logger.go:42: 14:55:09 | monitoring/4-check-metrics | "period_start_to": "2024-02-05T14:55:09+00:00" logger.go:42: 14:55:09 | monitoring/4-check-metrics | }' logger.go:42: 14:55:09 | monitoring/4-check-metrics | + jq '.rows[].fingerprint' logger.go:42: 14:55:09 | monitoring/4-check-metrics | ++ sed 's/\n//g' logger.go:42: 14:55:09 | monitoring/4-check-metrics | ++ echo '{' '"columns":[' '"load",' '"num_queries",' '"query_time"' '],' '"first_seen":' false, '"group_by":' '"queryid",' '"include_only_fields":' '[],' '"keyword":' '"",' '"labels":' '[' '{' '"key":' '"cluster",' '"value":' '["monitoring"]' '}],' '"limit":' 10, '"offset":' 0, '"order_by":' '"-load",' '"main_metric":' '"load",' '"period_start_from":' '"2024-02-05T02:55:09+00:00",' '"period_start_to":' '"2024-02-05T14:55:09+00:00"' '}' logger.go:42: 14:55:09 | monitoring/4-check-metrics | + run_curl -XPOST -d ''\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-02-05T02:55:09+00:00", "period_start_to": "2024-02-05T14:55:09+00:00" }'\''' https://api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport logger.go:42: 14:55:09 | monitoring/4-check-metrics | + kubectl -n kuttl-test-concrete-adder exec mysql-client -- bash -c 'curl -s -k -XPOST -d '\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2024-02-05T02:55:09+00:00", "period_start_to": "2024-02-05T14:55:09+00:00" }'\'' https://api_key:eyJrIjoiN05IRVd1cmVSUll0clgzeDJmNWc4RGVySE1MS2hiQnYiLCJuIjoib3BlcmF0b3ItbmV3IiwiaWQiOjF9@monitoring-service/v0/qan/GetReport' logger.go:42: 14:55:11 | monitoring/4-check-metrics | "TOTAL" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT `EVENT_NAME` , `COUNT_STAR` , `SUM_TIMER_WAIT` FROM `performance_schema` . `events_waits_summary_global_by_event_name`" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "REPLACE INTO `sys_operator` . `heartbeat` ( `ts` , `server_id` , FILE , `position` , `relay_master_log_file` , `exec_master_log_pos` ) VALUES (...)" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SHOW GLOBAL VARIABLES LIKE ?" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT COLUMN_NAME FROM `information_schema` . `columns` WHERE `table_schema` = ? AND TABLE_NAME = ? AND COLUMN_NAME IN (...) LIMIT ?" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS LIKE ?" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT NAME , `subsystem` , TYPE , COMMENT , `count` FROM `information_schema` . `innodb_metrics` WHERE `status` = ?" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT `t` . `table_schema` , `t` . `table_name` , COLUMN_NAME , AUTO_INCREMENT , `pow` ( ? , CASE `data_type` WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? END + ( `column_type` LIKE ? ) ) - ? AS `max_int` FROM `information_schema` . `columns` `c` STRAIGHT_JOIN `information_schema` . `tables` `t` ON BINARY `t` . `table_schema` = `c` . `table_schema` AND BINARY `t` . `table_name` = `c` . `table_name` WHERE `c` . `extra` = ? AND `t` . `auto_increment` IS NOT NULL" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT `conn_status` . `channel_name` AS `channel_name` , `conn_status` . `service_state` AS RELAY_THREAD , `applier_status` . `service_state` AS SQL_THREAD , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ? , `LAST_QUEUED_TRANSACTION_END_QUEUE_TIMESTAMP` - `LAST_QUEUED_TRANSACTION_START_QUEUE_TIMESTAMP` ? , `LAST_APPLIED_TRANSACTION_END_APPLY_TIMESTAMP` - `LAST_APPLIED_TRANSACTION_START_APPLY_TIMESTAMP` ? , IF ( `GTID_SUBTRACT` ( `LAST_QUEUED_TRANSACTION` , `LAST_APPLIED_TRANSACTION` ) = ?, ... , `abs` ( `time_to_sec` ( IF ( `time_to_sec` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` ) = ?, ... , `timediff` ( `APPLYING_TRANSACTION_ORIGINAL_COMMIT_TIMESTAMP` , NOW ( ) ) ) ) ) ) `lag_in_seconds` FROM `performance_schema` . `replication_connection_status` AS `conn_status` JOIN `performance_schema` . `replication_applier_status_by_worker` AS `applier_status` ON" logger.go:42: 14:55:11 | monitoring/4-check-metrics | "SELECT `performance_schema` . `events_statements_history` . `SQL_TEXT` , `performance_schema` . `events_statements_history` . `DIGEST` , `performance_schema` . `events_statements_history` . `DIGEST_TEXT` , `performance_schema` . `events_statements_history` . `CURRENT_SCHEMA` FROM `performance_schema` . `events_statements_history` WHERE `DIGEST` IS NOT NULL AND `SQL_TEXT` IS NOT NULL" logger.go:42: 14:55:11 | monitoring/4-check-metrics | ++ get_service_ip monitoring-haproxy logger.go:42: 14:55:11 | monitoring/4-check-metrics | ++ local service=monitoring-haproxy logger.go:42: 14:55:11 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:55:11 | monitoring/4-check-metrics | ++ grep -q NotFound logger.go:42: 14:55:11 | monitoring/4-check-metrics | +++ kubectl get service/monitoring-haproxy -n kuttl-test-concrete-adder -o 'jsonpath={.spec.type}' logger.go:42: 14:55:12 | monitoring/4-check-metrics | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 14:55:12 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 14:55:12 | monitoring/4-check-metrics | ++ egrep -q 'hostname|ip' logger.go:42: 14:55:12 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 14:55:13 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-concrete-adder -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 14:55:13 | monitoring/4-check-metrics | + haproxy_svc=34.71.49.195 logger.go:42: 14:55:13 | monitoring/4-check-metrics | ++ curl -s -o /dev/null -w '%{http_code}' http://34.71.49.195:8404/metrics logger.go:42: 14:55:14 | monitoring/4-check-metrics | + http_code=200 logger.go:42: 14:55:14 | monitoring/4-check-metrics | + [[ 200 != 200 ]] logger.go:42: 14:55:15 | monitoring/4-check-metrics | test step completed 4-check-metrics logger.go:42: 14:55:15 | monitoring/5-check-password-leak | starting test step 5-check-password-leak logger.go:42: 14:55:15 | monitoring/5-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 14:55:15 | monitoring/5-check-password-leak | + source ../../functions logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ realpath ../../.. logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++++ pwd logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/monitoring logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++ test_name=monitoring logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ GIT_BRANCH=PR-523 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export VERSION=PR-523-f00253e logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ VERSION=PR-523-f00253e logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++++ which gdate logger.go:42: 14:55:15 | monitoring/5-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:55:15 | monitoring/5-check-password-leak | ++++ which date logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ date=/usr/bin/date logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ command -v oc logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ kubectl get nodes logger.go:42: 14:55:15 | monitoring/5-check-password-leak | +++ grep '^minikube' logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + check_passwords_leak logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + local secrets logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + local passwords logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + local pods logger.go:42: 14:55:16 | monitoring/5-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 14:55:16 | monitoring/5-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + secrets= logger.go:42: 14:55:16 | monitoring/5-check-password-leak | + passwords=' ' logger.go:42: 14:55:16 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pods -o name logger.go:42: 14:55:16 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + pods='monitoring-0 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-haproxy-0 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-haproxy-1 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-haproxy-2 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-mysql-0 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-mysql-1 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-mysql-2 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-orc-0 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-orc-1 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | monitoring-orc-2 logger.go:42: 14:55:17 | monitoring/5-check-password-leak | mysql-client logger.go:42: 14:55:17 | monitoring/5-check-password-leak | percona-server-mysql-operator-b4c599bbb-drnc2' logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + collect_logs kuttl-test-concrete-adder logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + local containers logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + local count logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + NS=kuttl-test-concrete-adder logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:17 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + containers=monitoring logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:17 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-0 -c monitoring logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt logger.go:42: 14:55:18 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-monitoring.txt logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:18 | monitoring/5-check-password-leak | logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:18 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:18 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-0 -c haproxy logger.go:42: 14:55:19 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 14:55:19 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 14:55:19 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:19 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-0 -c mysql-monit logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 14:55:20 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-0 -c pmm-client logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 14:55:20 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:20 | monitoring/5-check-password-leak | logger.go:42: 14:55:20 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:20 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:21 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 14:55:21 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:21 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-1 -c haproxy logger.go:42: 14:55:22 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 14:55:22 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 14:55:22 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:22 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-1 -c mysql-monit logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 14:55:23 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-1 -c pmm-client logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 14:55:23 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:23 | monitoring/5-check-password-leak | logger.go:42: 14:55:23 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:23 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:24 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 14:55:24 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:24 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-2 -c haproxy logger.go:42: 14:55:25 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 14:55:25 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 14:55:25 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:25 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-2 -c mysql-monit logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 14:55:26 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-haproxy-2 -c pmm-client logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 14:55:26 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:26 | monitoring/5-check-password-leak | logger.go:42: 14:55:26 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:26 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-0 -c mysql logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 14:55:27 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:27 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-0 -c xtrabackup logger.go:42: 14:55:28 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 14:55:28 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 14:55:28 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:28 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-0 -c pt-heartbeat logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 14:55:29 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-0 -c pmm-client logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 14:55:29 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:29 | monitoring/5-check-password-leak | logger.go:42: 14:55:29 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:29 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:30 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 14:55:30 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:30 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-1 -c mysql logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 14:55:31 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-1 -c xtrabackup logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 14:55:31 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:31 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-1 -c pt-heartbeat logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 14:55:32 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-1 -c pmm-client logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 14:55:32 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:32 | monitoring/5-check-password-leak | logger.go:42: 14:55:32 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:32 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:33 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 14:55:33 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:33 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-2 -c mysql logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 14:55:34 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-2 -c xtrabackup logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 14:55:34 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:34 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-2 -c pt-heartbeat logger.go:42: 14:55:35 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 14:55:35 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 14:55:35 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:35 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-mysql-2 -c pmm-client logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 14:55:36 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:36 | monitoring/5-check-password-leak | logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:36 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:36 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-0 -c orc logger.go:42: 14:55:37 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 14:55:37 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 14:55:37 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:37 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-0 -c mysql-monit logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 14:55:38 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:38 | monitoring/5-check-password-leak | logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:38 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:38 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-1 -c orc logger.go:42: 14:55:39 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 14:55:39 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 14:55:39 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:39 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-1 -c mysql-monit logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 14:55:40 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:40 | monitoring/5-check-password-leak | logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:40 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod monitoring-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:40 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-2 -c orc logger.go:42: 14:55:41 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 14:55:41 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 14:55:41 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:41 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs monitoring-orc-2 -c mysql-monit logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 14:55:42 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:42 | monitoring/5-check-password-leak | logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:42 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + containers=mysql-client logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:42 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs mysql-client -c mysql-client logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 14:55:43 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:43 | monitoring/5-check-password-leak | logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 14:55:43 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-concrete-adder get pod percona-server-mysql-operator-b4c599bbb-drnc2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + containers=manager logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 14:55:43 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-concrete-adder logs percona-server-mysql-operator-b4c599bbb-drnc2 -c manager logger.go:42: 14:55:44 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-b4c599bbb-drnc2-manager.txt logger.go:42: 14:55:44 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-b4c599bbb-drnc2-manager.txt logger.go:42: 14:55:44 | monitoring/5-check-password-leak | + echo logger.go:42: 14:55:44 | monitoring/5-check-password-leak | logger.go:42: 14:55:44 | monitoring/5-check-password-leak | + '[' -n '' ']' logger.go:42: 14:55:44 | monitoring/5-check-password-leak | test step completed 5-check-password-leak logger.go:42: 14:55:44 | monitoring/99-drop-finalizer | starting test step 99-drop-finalizer logger.go:42: 14:55:46 | monitoring/99-drop-finalizer | PerconaServerMySQL:kuttl-test-concrete-adder/monitoring updated logger.go:42: 14:55:46 | monitoring/99-drop-finalizer | test step completed 99-drop-finalizer logger.go:42: 14:55:46 | monitoring | monitoring events from ns kuttl-test-concrete-adder: logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:42 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-drnc2 Scheduled Successfully assigned kuttl-test-concrete-adder/percona-server-mysql-operator-b4c599bbb-drnc2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:42 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-b4c599bbb SuccessfulCreate Created pod: percona-server-mysql-operator-b4c599bbb-drnc2 replicaset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:42 +0000 UTC Normal Deployment.apps percona-server-mysql-operator ScalingReplicaSet Scaled up replica set percona-server-mysql-operator-b4c599bbb to 1 deployment-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-drnc2_2ecdc778-5ace-447e-a3e2-fb0bb857ec6a became leader percona-server-mysql-operator-b4c599bbb-drnc2_2ecdc778-5ace-447e-a3e2-fb0bb857ec6a logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-concrete-adder/mysql-client to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-drnc2.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-drnc2.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 110.208472ms (110.219294ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-drnc2.spec.containers{manager} Created Created container manager kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:44 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-drnc2.spec.containers{manager} Started Started container manager kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:45 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:46 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:46 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmmdata-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:42:56 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-concrete-adder/pmmdata-monitoring-0" pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:00 +0000 UTC Normal Pod monitoring-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:00 +0000 UTC Normal PersistentVolumeClaim pmmdata-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-3b5f4f71-568f-4912-88da-1a5b3673bae2 pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:08 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-3b5f4f71-568f-4912-88da-1a5b3673bae2" attachdetach-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:09 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulling Pulling image "perconalab/pmm-server:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:09 +0000 UTC Warning Service monitoring-service SyncLoadBalancerFailed Error syncing load balancer: failed to ensure load balancer: failed to create target pool for load balancer (a0b939cbdca034f488dafd5d008bd793(kuttl-test-concrete-adder/monitoring-service)): googleapi: Error 403: QUOTA_EXCEEDED - Quota 'FIREWALLS' exceeded. Limit: 300.0 globally. logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:43 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Pulled Successfully pulled image "perconalab/pmm-server:dev-latest" in 34.192611308s (34.192625722s including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:43 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Created Created container monitoring kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:43:43 +0000 UTC Normal Pod monitoring-0.spec.containers{monitoring} Started Started container monitoring kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:19 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:37 +0000 UTC Normal Service monitoring-haproxy EnsuringLoadBalancer Ensuring load balancer logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:38 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:38 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:38 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-concrete-adder/datadir-monitoring-mysql-0" pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:38 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:38 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:40 +0000 UTC Normal Pod monitoring-orc-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-orc-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-zw2p default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:40 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:41 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:41 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 97.425719ms (97.436277ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:41 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:41 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-2f3172ab-b9bf-489a-997f-9bf9ec3cfd0e pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-mysql-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-5zlw default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 75.313699ms (75.327859ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 69.128365ms (69.134565ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:42 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:50 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2f3172ab-b9bf-489a-997f-9bf9ec3cfd0e" attachdetach-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 116.905777ms (116.914381ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 81.97081ms (81.983469ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 83.947492ms (83.958041ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 100.559874ms (100.570575ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:53 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:54 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:57 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 3.608502321s (3.608567528s including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:57 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:44:57 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:13 +0000 UTC Normal Service monitoring-haproxy EnsuredLoadBalancer Ensured load balancer logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:16 +0000 UTC Normal Pod monitoring-orc-1 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-orc-1 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:16 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:16 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 97.324913ms (97.343639ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:16 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:16 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:17 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:18 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:18 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 89.937956ms (89.95849ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 88.643401ms (88.66098ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:19 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:26 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:26 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:26 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-concrete-adder/datadir-monitoring-mysql-1" pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:26 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:26 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:30 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-e89568a0-89f4-49ad-a1ec-43f49d0af956 pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:30 +0000 UTC Normal Pod monitoring-mysql-1 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-1 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-zw2p default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:32 +0000 UTC Normal Pod monitoring-haproxy-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:32 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:33 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:33 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 145.238616ms (145.254214ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:33 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:33 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:35 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 96.339342ms (96.355871ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 99.440543ms (99.467412ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:36 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 3.765958468s (3.765969818s including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:40 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:41 +0000 UTC Normal Pod monitoring-haproxy-1 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-1 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-5zlw default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:41 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:41 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 99.903242ms (99.91843ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:41 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:41 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:42 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 85.018857ms (85.035189ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 102.934339ms (102.958944ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:43 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 109.560333ms (109.567651ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:44 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:44 +0000 UTC Normal Pod monitoring-haproxy-2 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-zw2p default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:44 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 116.421276ms (116.435633ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:46 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:46 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 100.841781ms (100.848785ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:46 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 86.2291ms (86.245872ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:50 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 3.250178431s (3.250195597s including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:50 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:50 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:51 +0000 UTC Normal Pod monitoring-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e89568a0-89f4-49ad-a1ec-43f49d0af956" attachdetach-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:51 +0000 UTC Normal Pod monitoring-orc-2 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-orc-2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-5zlw default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:51 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 97.064324ms (97.078869ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 107.448421ms (107.456949ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:52 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 75.105374ms (75.123673ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 98.542942ms (98.550479ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:53 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:55 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:55 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 87.415276ms (87.445413ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:55 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 104.777955ms (104.785317ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 133.25837ms (133.26533ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 100.452184ms (100.468556ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:45:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:13 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:13 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:16 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 84.871465ms (84.88539ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:47 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:47 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:47 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-concrete-adder/datadir-monitoring-mysql-2" pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:47 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:47 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:51 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-d03f8f43-d4b6-4d9b-b936-79ba18203d9d pd.csi.storage.gke.io_gke-691abb2cbdc24ee7a4c4-482d-f440-vm_34965163-c6db-441e-84a3-be5b143dbe0e logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:51 +0000 UTC Normal Pod monitoring-mysql-2 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:46:58 +0000 UTC Normal Pod monitoring-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d03f8f43-d4b6-4d9b-b936-79ba18203d9d" attachdetach-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:00 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:00 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 113.846568ms (113.854527ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:00 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:00 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 106.536833ms (106.544956ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 92.202245ms (92.214517ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 84.131934ms (84.139082ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 82.304339ms (82.325126ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:21 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:21 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:47:24 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 104.288046ms (104.306034ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:19 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:19 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:19 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:19 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:20 +0000 UTC Normal Pod monitoring-haproxy-2 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-zw2p default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:21 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:21 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 96.853508ms (96.886311ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:21 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:21 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94.973815ms (94.989002ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 92.296694ms (92.318512ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 75.550816ms (75.55903ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:23 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:24 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:24 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:24 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:24 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:25 +0000 UTC Normal Pod monitoring-haproxy-1 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-1 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-5zlw default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:26 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:26 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 101.74488ms (101.758894ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:26 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:26 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:27 +0000 UTC Normal Pod monitoring-mysql-1 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-1 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-zw2p default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 81.184172ms (81.201449ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 86.320003ms (86.334643ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 86.192582ms (86.213896ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:28 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:29 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 78.035578ms (78.05096ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 86.371301ms (86.388103ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 99.503308ms (99.511061ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 102.88464ms (102.899263ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 90.566078ms (90.5744ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:35 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:38 +0000 UTC Normal Pod monitoring-haproxy-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-haproxy-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:39 +0000 UTC Warning Pod monitoring-haproxy-0 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:41 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:41 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 128.790985ms (128.809713ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:41 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:41 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 78.728323ms (78.771519ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 74.114576ms (74.1216ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 86.532811ms (86.540628ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:43 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:44 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:52 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:48:56 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 84.119742ms (84.127025ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:35 +0000 UTC Normal Pod monitoring-mysql-2 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-2 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-8073 default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 99.803875ms (99.813158ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 85.954141ms (85.970127ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 93.383787ms (93.391564ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:46 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 96.261046ms (96.268302ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 103.513827ms (103.525643ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:49:47 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:05 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:05 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:08 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 97.09295ms (97.101261ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:50:56 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/02/05 14:50:56 readiness check failed: connect to db: ping DB: dial tcp 10.54.48.21:33062: connect: connection refused kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:01 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2024/02/05 14:51:01 readiness check failed: connect to db: ping DB: dial tcp 10.54.48.21:33062: connect: connection refused kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:11 +0000 UTC Normal Pod monitoring-mysql-0 Scheduled Successfully assigned kuttl-test-concrete-adder/monitoring-mysql-0 to gke-jen-ps-523-f00253e-4-default-pool-66e70372-5zlw default-scheduler logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:13 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:13 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 99.272012ms (99.28747ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:13 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:13 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 83.633659ms (83.649207ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 82.98518ms (83.013422ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 91.366351ms (91.378817ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:dev-latest" kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:dev-latest" in 105.687318ms (105.736243ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:33 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:33 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:55:46 | monitoring | 2024-02-05 14:51:37 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 87.252977ms (87.260584ms including waiting) kubelet logger.go:42: 14:55:46 | monitoring | Deleting namespace: kuttl-test-concrete-adder === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (847.29s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/monitoring (844.60s) PASS