=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.56.160.66 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring === PAUSE kuttl/harness/monitoring === CONT kuttl/harness/monitoring logger.go:42: 02:49:13 | monitoring | Creating namespace: kuttl-test-perfect-hippo logger.go:42: 02:49:13 | monitoring/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 02:49:13 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 02:49:13 | monitoring/0-deploy-operator | + source ../../functions logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ realpath ../../.. logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++++ pwd logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++ test_name=monitoring logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-873 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-873 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export VERSION=PR-873-af2827af logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ VERSION=PR-873-af2827af logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++++ which gdate logger.go:42: 02:49:13 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:49:13 | monitoring/0-deploy-operator | ++++ which date logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ oc get projects logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ : logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ kubectl get nodes logger.go:42: 02:49:13 | monitoring/0-deploy-operator | +++ grep '^minikube' logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + init_temp_dir logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + deploy_operator logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + destroy_operator logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 02:49:14 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:49:14 | monitoring/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + true logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 02:49:14 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:49:14 | monitoring/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + true logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + create_namespace ps-operator logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + local namespace=ps-operator logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + [[ -n '' ]] logger.go:42: 02:49:14 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 02:49:15 | monitoring/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 02:49:15 | monitoring/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 02:49:16 | monitoring/0-deploy-operator | namespace/ps-operator created logger.go:42: 02:49:16 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy/crd.yaml logger.go:42: 02:49:17 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 02:49:17 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 02:49:18 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 02:49:18 | monitoring/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 02:49:18 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy/cw-rbac.yaml logger.go:42: 02:49:19 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 02:49:19 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:49:19 | monitoring/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:49:20 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:49:20 | monitoring/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:49:20 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 02:49:20 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 02:49:20 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 02:49:20 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:49:20 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-873-af2827af"' /mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy/cw-operator.yaml logger.go:42: 02:49:21 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 02:49:21 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 02:49:21 | monitoring/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 02:49:21 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-perfect-hippo apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf/secrets.yaml logger.go:42: 02:49:22 | monitoring/0-deploy-operator | secret/test-secrets created logger.go:42: 02:49:22 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 02:49:22 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-perfect-hippo apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf/ssl-secret.yaml logger.go:42: 02:49:23 | monitoring/0-deploy-operator | secret/test-ssl created logger.go:42: 02:49:23 | monitoring/0-deploy-operator | + deploy_client logger.go:42: 02:49:23 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-perfect-hippo apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf/client.yaml logger.go:42: 02:49:24 | monitoring/0-deploy-operator | pod/mysql-client created logger.go:42: 02:49:24 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:24 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:24 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:49:26 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:26 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:26 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:49:27 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:28 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:28 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:49:29 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:29 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:30 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:49:31 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:31 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:31 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:49:33 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:49:33 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:49:33 | monitoring/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 02:49:33 | monitoring/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 02:49:33 | monitoring/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 02:49:33 | monitoring/0-deploy-operator | ASSERT PASS logger.go:42: 02:49:33 | monitoring/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm_server sleep 30 # wait for PMM Server to start TOKEN=$(get_pmm_server_token) kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch "$(jq -n --arg token "$TOKEN" '{"stringData": {"pmmservertoken": $token}}')"] logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | + source ../../functions logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++++ pwd logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-873 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-873 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-873-af2827af logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-873-af2827af logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++++ which gdate logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | ++++ which date logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ date=/usr/bin/date logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ oc get projects logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ : logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes logger.go:42: 02:49:33 | monitoring/1-deploy-pmm-server | +++ grep '^minikube' logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + deploy_pmm_server logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + helm uninstall -n kuttl-test-perfect-hippo monitoring logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | Error: uninstall: Release not loaded: monitoring: release: not found logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + : logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + helm repo remove percona logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | Error: no repositories configured logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + : logger.go:42: 02:49:34 | monitoring/1-deploy-pmm-server | + kubectl delete clusterrole monitoring --ignore-not-found logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | + kubectl delete clusterrolebinding monitoring --ignore-not-found logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | + helm repo add percona https://percona.github.io/percona-helm-charts/ logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | "percona" has been added to your repositories logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | + helm install monitoring percona/pmm -n kuttl-test-perfect-hippo --set fullnameOverride=monitoring --version 1.4.0 --set imageTag=3-dev-latest --set imageRepo=perconalab/pmm-server --set service.type=LoadBalancer --force logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:35 | monitoring/1-deploy-pmm-server | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-873/kubeconfig logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | NAME: monitoring logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Wed Mar 26 02:49:37 2025 logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-perfect-hippo logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | STATUS: deployed logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | REVISION: 1 logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | NOTES: logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | Percona Monitoring and Management (PMM) logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | An open source database monitoring, observability and management tool logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | Get the application URL: logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | NOTE: It may take a few minutes for the LoadBalancer IP to be available. logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | You can watch the status of by running 'kubectl get --namespace kuttl-test-perfect-hippo svc -w monitoring-service' logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | export SERVICE_IP=$(kubectl get svc --namespace kuttl-test-perfect-hippo monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | echo https://$SERVICE_IP: logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | Get password for the "admin" user: logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace kuttl-test-perfect-hippo -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | echo $ADMIN_PASS logger.go:42: 02:49:42 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:49:43 | monitoring/1-deploy-pmm-server | Error from server (BadRequest): pod monitoring-0 does not have a host assigned logger.go:42: 02:49:43 | monitoring/1-deploy-pmm-server | + echo 'Retry ' logger.go:42: 02:49:43 | monitoring/1-deploy-pmm-server | Retry logger.go:42: 02:49:43 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:49:48 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:49:48 | monitoring/1-deploy-pmm-server | + '[' 1 -ge 20 ']' logger.go:42: 02:49:48 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:49:49 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:49:49 | monitoring/1-deploy-pmm-server | + echo 'Retry 1' logger.go:42: 02:49:49 | monitoring/1-deploy-pmm-server | Retry 1 logger.go:42: 02:49:49 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:49:54 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:49:54 | monitoring/1-deploy-pmm-server | + '[' 2 -ge 20 ']' logger.go:42: 02:49:54 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:49:55 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:49:55 | monitoring/1-deploy-pmm-server | + echo 'Retry 2' logger.go:42: 02:49:55 | monitoring/1-deploy-pmm-server | Retry 2 logger.go:42: 02:49:55 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:00 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:00 | monitoring/1-deploy-pmm-server | + '[' 3 -ge 20 ']' logger.go:42: 02:50:00 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:01 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:01 | monitoring/1-deploy-pmm-server | + echo 'Retry 3' logger.go:42: 02:50:01 | monitoring/1-deploy-pmm-server | Retry 3 logger.go:42: 02:50:01 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:06 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:06 | monitoring/1-deploy-pmm-server | + '[' 4 -ge 20 ']' logger.go:42: 02:50:06 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:08 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:08 | monitoring/1-deploy-pmm-server | + echo 'Retry 4' logger.go:42: 02:50:08 | monitoring/1-deploy-pmm-server | Retry 4 logger.go:42: 02:50:08 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:13 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:13 | monitoring/1-deploy-pmm-server | + '[' 5 -ge 20 ']' logger.go:42: 02:50:13 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:14 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:14 | monitoring/1-deploy-pmm-server | + echo 'Retry 5' logger.go:42: 02:50:14 | monitoring/1-deploy-pmm-server | Retry 5 logger.go:42: 02:50:14 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:19 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:19 | monitoring/1-deploy-pmm-server | + '[' 6 -ge 20 ']' logger.go:42: 02:50:19 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:20 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:20 | monitoring/1-deploy-pmm-server | + echo 'Retry 6' logger.go:42: 02:50:20 | monitoring/1-deploy-pmm-server | Retry 6 logger.go:42: 02:50:20 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:25 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:25 | monitoring/1-deploy-pmm-server | + '[' 7 -ge 20 ']' logger.go:42: 02:50:25 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:26 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:26 | monitoring/1-deploy-pmm-server | + echo 'Retry 7' logger.go:42: 02:50:26 | monitoring/1-deploy-pmm-server | Retry 7 logger.go:42: 02:50:26 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:31 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:31 | monitoring/1-deploy-pmm-server | + '[' 8 -ge 20 ']' logger.go:42: 02:50:31 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:32 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:32 | monitoring/1-deploy-pmm-server | + echo 'Retry 8' logger.go:42: 02:50:32 | monitoring/1-deploy-pmm-server | Retry 8 logger.go:42: 02:50:32 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:37 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:37 | monitoring/1-deploy-pmm-server | + '[' 9 -ge 20 ']' logger.go:42: 02:50:37 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:38 | monitoring/1-deploy-pmm-server | error: unable to upgrade connection: container not found ("pmm") logger.go:42: 02:50:38 | monitoring/1-deploy-pmm-server | + echo 'Retry 9' logger.go:42: 02:50:38 | monitoring/1-deploy-pmm-server | Retry 9 logger.go:42: 02:50:38 | monitoring/1-deploy-pmm-server | + sleep 5 logger.go:42: 02:50:43 | monitoring/1-deploy-pmm-server | + let retry+=1 logger.go:42: 02:50:43 | monitoring/1-deploy-pmm-server | + '[' 10 -ge 20 ']' logger.go:42: 02:50:43 | monitoring/1-deploy-pmm-server | + kubectl -n kuttl-test-perfect-hippo exec monitoring-0 -- bash -c 'ls -l /proc/*/exe 2>/dev/null| grep postgres >/dev/null' logger.go:42: 02:50:44 | monitoring/1-deploy-pmm-server | + sleep 30 logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | ++ get_pmm_server_token logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | ++ local key_name= logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]] logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | ++ key_name=operator logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-perfect-hippo get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 02:51:14 | monitoring/1-deploy-pmm-server | +++ base64 --decode logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD='o !dPlG!qqrc@y4d' logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++ [[ -z o !dPlG!qqrc@y4d ]] logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++ local create_response create_status_code create_json_response logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++++ get_service_ip monitoring-service logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++++ local service=monitoring-service logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | ++++ grep -q NotFound logger.go:42: 02:51:15 | monitoring/1-deploy-pmm-server | +++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:51:16 | monitoring/1-deploy-pmm-server | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:51:16 | monitoring/1-deploy-pmm-server | ++++ egrep -q 'hostname|ip' logger.go:42: 02:51:16 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:51:16 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:51:16 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts -w '\n%{http_code}' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ create_response='{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ tail -n1 logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ create_status_code=201 logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ sed '$ d' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ create_json_response='{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ [[ 201 -ne 201 ]] logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ local service_account_id logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | +++ jq -r .id logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ service_account_id=2 logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ [[ -z 2 ]] logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ [[ 2 == \n\u\l\l ]] logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++ local token_response token_status_code token_json_response logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++++ get_service_ip monitoring-service logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++++ local service=monitoring-service logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:51:17 | monitoring/1-deploy-pmm-server | ++++ grep -q NotFound logger.go:42: 02:51:18 | monitoring/1-deploy-pmm-server | +++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:51:18 | monitoring/1-deploy-pmm-server | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:51:18 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:51:18 | monitoring/1-deploy-pmm-server | ++++ egrep -q 'hostname|ip' logger.go:42: 02:51:18 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:51:19 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:51:19 | monitoring/1-deploy-pmm-server | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ token_response='{"id":1,"name":"operator","key":"glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7"} logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | +++ echo '{"id":1,"name":"operator","key":"glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7"} logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | +++ tail -n1 logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ token_status_code=200 logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | +++ echo '{"id":1,"name":"operator","key":"glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7"} logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | +++ sed '$ d' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ token_json_response='{"id":1,"name":"operator","key":"glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7"}' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ [[ 200 -ne 200 ]] logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ echo '{"id":1,"name":"operator","key":"glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7"}' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ jq -r .key logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | + TOKEN=glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7 logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | ++ jq -n --arg token glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7 '{"stringData": {"pmmservertoken": $token}}' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-perfect-hippo secret test-secrets --type merge --patch '{ logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | "stringData": { logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | "pmmservertoken": "glsa_aHh9UyKOKSAlD6EShluhJ1GCpq3jEyjx_f8f2ede7" logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | } logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | }' logger.go:42: 02:51:20 | monitoring/1-deploy-pmm-server | secret/test-secrets patched [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 12 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002ebc00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002ebc00}, 0x0}, {0x184a055?, 0xc000595f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00045a9a0, {0x1accd90, 0xc000434b40}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc000183d48?, {0x0, 0xc00045a9a0, {0x1accd90, 0xc000434b40}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc000183d48, {0x0, 0xc00045a9a0, {0x1accd90, 0xc000434b40}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0003bf208, 0x24?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0002bf930, 0xc0000dcb60, {0xc0005bc4c8, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0002bf930, 0xc0000dcb60, {0xc0005bc4c8, 0x18}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0002794a0, 0xc0000dcb60, 0xc0005ff170) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc0000dcb60) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc0000dcb60, 0xc0003c9ed8) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 11 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 02:51:21 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 02:51:21 | monitoring/2-create-cluster | starting test step 2-create-cluster logger.go:42: 02:51:21 | monitoring/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.pmm.enabled = true' - \ | yq eval '.spec.proxy.haproxy.enabled = true' - \ | yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 02:51:21 | monitoring/2-create-cluster | + source ../../functions logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ realpath ../../.. logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++++ pwd logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ test_name=monitoring logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-873 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-873 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export VERSION=PR-873-af2827af logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ VERSION=PR-873-af2827af logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++++ which gdate logger.go:42: 02:51:21 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++++ which date logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ oc get projects logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ : logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ kubectl get nodes logger.go:42: 02:51:21 | monitoring/2-create-cluster | +++ grep '^minikube' logger.go:42: 02:51:21 | monitoring/2-create-cluster | + get_cr logger.go:42: 02:51:21 | monitoring/2-create-cluster | + local name_suffix= logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + kubectl -n kuttl-test-perfect-hippo apply -f - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy/cr.yaml logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-873-af2827af"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | + '[' -n '' ']' logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 02:51:21 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:51:21 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 02:51:22 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created logger.go:42: 02:54:41 | monitoring/2-create-cluster | test step completed 2-create-cluster logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | starting test step 3-rotate-pmm-token logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | running command: [sh -c set -o errexit set -o xtrace source ../../functions # add new PMM API token to secret NEW_TOKEN=$(get_pmm_server_token "operator-new") kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch "$(jq -n --arg token "$NEW_TOKEN" '{"stringData": {"pmmservertoken": $token}}')" # delete old PMM token delete_pmm_server_token "operator" sleep 10] logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | + source ../../functions logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ realpath ../../.. logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++++ pwd logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++ test_name=monitoring logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export GIT_BRANCH=PR-873 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ GIT_BRANCH=PR-873 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export VERSION=PR-873-af2827af logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ VERSION=PR-873-af2827af logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++++ which gdate logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | ++++ which date logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ date=/usr/bin/date logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ oc get projects logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ : logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ kubectl get nodes logger.go:42: 02:54:41 | monitoring/3-rotate-pmm-token | +++ grep '^minikube' logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ get_pmm_server_token operator-new logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ local key_name=operator-new logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ [[ -z operator-new ]] logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ local ADMIN_PASSWORD logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | +++ kubectl -n kuttl-test-perfect-hippo get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | +++ base64 --decode logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ ADMIN_PASSWORD='o !dPlG!qqrc@y4d' logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ [[ -z o !dPlG!qqrc@y4d ]] logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++ local create_response create_status_code create_json_response logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++++ get_service_ip monitoring-service logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++++ local service=monitoring-service logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | ++++ grep -q NotFound logger.go:42: 02:54:42 | monitoring/3-rotate-pmm-token | +++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:43 | monitoring/3-rotate-pmm-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:54:43 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:54:43 | monitoring/3-rotate-pmm-token | ++++ egrep -q 'hostname|ip' logger.go:42: 02:54:43 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:54:43 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator-new", "role":"Admin", "isDisabled":false}' --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts -w '\n%{http_code}' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ create_response='{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ tail -n1 logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ create_status_code=201 logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ sed '$ d' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ create_json_response='{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ [[ 201 -ne 201 ]] logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ local service_account_id logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | +++ jq -r .id logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ service_account_id=3 logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ [[ -z 3 ]] logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ [[ 3 == \n\u\l\l ]] logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++ local token_response token_status_code token_json_response logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++++ get_service_ip monitoring-service logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++++ local service=monitoring-service logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:44 | monitoring/3-rotate-pmm-token | ++++ grep -q NotFound logger.go:42: 02:54:45 | monitoring/3-rotate-pmm-token | +++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:45 | monitoring/3-rotate-pmm-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:54:45 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:54:45 | monitoring/3-rotate-pmm-token | ++++ egrep -q 'hostname|ip' logger.go:42: 02:54:45 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:54:46 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:54:46 | monitoring/3-rotate-pmm-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new"}' --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ token_response='{"id":2,"name":"operator-new","key":"glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d"} logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | +++ echo '{"id":2,"name":"operator-new","key":"glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d"} logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | +++ tail -n1 logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ token_status_code=200 logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | +++ echo '{"id":2,"name":"operator-new","key":"glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d"} logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | +++ sed '$ d' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ token_json_response='{"id":2,"name":"operator-new","key":"glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d"}' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ [[ 200 -ne 200 ]] logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ echo '{"id":2,"name":"operator-new","key":"glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d"}' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ jq -r .key logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + NEW_TOKEN=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ jq -n --arg token glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d '{"stringData": {"pmmservertoken": $token}}' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + kubectl patch -n kuttl-test-perfect-hippo secret test-secrets --type merge --patch '{ logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | "stringData": { logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | "pmmservertoken": "glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d" logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | } logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | }' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | secret/test-secrets patched logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + delete_pmm_server_token operator logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + local key_name=operator logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + [[ -z operator ]] logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | + local ADMIN_PASSWORD logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-perfect-hippo get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 02:54:47 | monitoring/3-rotate-pmm-token | ++ base64 --decode logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | + ADMIN_PASSWORD='o !dPlG!qqrc@y4d' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | + [[ -z o !dPlG!qqrc@y4d ]] logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | + local 'user_credentials=admin:o !dPlG!qqrc@y4d' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | + local service_accounts_response service_accounts_status logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:54:48 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 02:54:49 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:54:49 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:54:49 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X GET --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts/search -w '\n%{http_code}' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + service_accounts_status=200 logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ sed '$ d' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + local service_account_id logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + service_account_id=2 logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + [[ -z 2 ]] logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + [[ 2 == \n\u\l\l ]] logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | + local tokens_response tokens_status tokens_json logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 02:54:50 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:51 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:54:51 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:54:51 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 02:54:51 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:54:51 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X GET --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + tokens_response='[{"id":1,"name":"operator","created":"2025-03-26T02:51:20Z","lastUsedAt":"2025-03-26T02:54:47Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-03-26T02:51:20Z","lastUsedAt":"2025-03-26T02:54:47Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + tokens_status=200 logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-03-26T02:51:20Z","lastUsedAt":"2025-03-26T02:54:47Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ sed '$ d' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + tokens_json='[{"id":1,"name":"operator","created":"2025-03-26T02:51:20Z","lastUsedAt":"2025-03-26T02:54:47Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + local token_id logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-03-26T02:51:20Z","lastUsedAt":"2025-03-26T02:54:47Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | ++ jq -r '.[] | select(.name == "operator").id' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + token_id=1 logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + [[ -z 1 ]] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + [[ 1 == \n\u\l\l ]] logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | + local delete_response delete_status logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:52 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 02:54:53 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 02:54:53 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 02:54:53 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 02:54:53 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 02:54:53 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X DELETE --user 'admin:o !dPlG!qqrc@y4d' https://35.239.165.225/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | + delete_response='{"message":"Service account token deleted"} logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | ++ echo '{"message":"Service account token deleted"} logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | + delete_status=200 logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 02:54:54 | monitoring/3-rotate-pmm-token | + sleep 10 logger.go:42: 02:58:25 | monitoring/3-rotate-pmm-token | test step completed 3-rotate-pmm-token logger.go:42: 02:58:25 | monitoring/4-check-metrics | starting test step 4-check-metrics logger.go:42: 02:58:25 | monitoring/4-check-metrics | running command: [sh -c set -o errexit set -o xtrace source ../../functions sleep 70 # we should wait more than one minute because `get_metric_values` gets data for the last 60 seconds TOKEN=$(kubectl get secret internal-monitoring -o jsonpath='{.data.pmmservertoken}' -n "${NAMESPACE}" | base64 --decode) for i in $(seq 0 2); do get_metric_values node_boot_time_seconds ${NAMESPACE}-$(get_cluster_name)-mysql-${i} $TOKEN get_metric_values mysql_global_status_uptime ${NAMESPACE}-$(get_cluster_name)-mysql-${i} $TOKEN done sleep 90 # wait for QAN get_qan20_values monitoring-mysql-0 $TOKEN haproxy_svc=$(get_service_ip "monitoring-haproxy") http_code=$(curl -s -o /dev/null -w "%{http_code}" http://${haproxy_svc}:8404/metrics) if [[ $http_code != 200 ]]; then echo "Error: http code is $http_code" exit 1 fi] logger.go:42: 02:58:25 | monitoring/4-check-metrics | + source ../../functions logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ realpath ../../.. logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++++ pwd logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++ test_name=monitoring logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export GIT_BRANCH=PR-873 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ GIT_BRANCH=PR-873 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export VERSION=PR-873-af2827af logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ VERSION=PR-873-af2827af logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++++ which gdate logger.go:42: 02:58:25 | monitoring/4-check-metrics | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:58:25 | monitoring/4-check-metrics | ++++ which date logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ date=/usr/bin/date logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ oc get projects logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ : logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ kubectl get nodes logger.go:42: 02:58:25 | monitoring/4-check-metrics | +++ grep '^minikube' logger.go:42: 02:58:25 | monitoring/4-check-metrics | + sleep 70 logger.go:42: 02:59:35 | monitoring/4-check-metrics | ++ kubectl get secret internal-monitoring -o 'jsonpath={.data.pmmservertoken}' -n kuttl-test-perfect-hippo logger.go:42: 02:59:35 | monitoring/4-check-metrics | ++ base64 --decode logger.go:42: 02:59:36 | monitoring/4-check-metrics | + TOKEN=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:36 | monitoring/4-check-metrics | ++ seq 0 2 logger.go:42: 02:59:36 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 02:59:36 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:36 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:36 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-perfect-hippo-monitoring-mysql-0 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:36 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 02:59:36 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-0 logger.go:42: 02:59:36 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:36 | monitoring/4-check-metrics | + local start=1742957916 logger.go:42: 02:59:36 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:36 | monitoring/4-check-metrics | + local end=1742957976 logger.go:42: 02:59:36 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:38 | monitoring/4-check-metrics | "1742956309" logger.go:42: 02:59:38 | monitoring/4-check-metrics | "1742956309" logger.go:42: 02:59:38 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:38 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:38 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-perfect-hippo-monitoring-mysql-0 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:38 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 02:59:38 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-0 logger.go:42: 02:59:38 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:38 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:38 | monitoring/4-check-metrics | + local start=1742957918 logger.go:42: 02:59:38 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:38 | monitoring/4-check-metrics | + local end=1742957978 logger.go:42: 02:59:38 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:40 | monitoring/4-check-metrics | "165" logger.go:42: 02:59:40 | monitoring/4-check-metrics | "109" logger.go:42: 02:59:40 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 02:59:40 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:40 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:40 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-perfect-hippo-monitoring-mysql-1 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:40 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 02:59:40 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-1 logger.go:42: 02:59:40 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:40 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:40 | monitoring/4-check-metrics | + local start=1742957920 logger.go:42: 02:59:40 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:40 | monitoring/4-check-metrics | + local end=1742957980 logger.go:42: 02:59:40 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:42 | monitoring/4-check-metrics | "1742954221" logger.go:42: 02:59:42 | monitoring/4-check-metrics | "1742954221" logger.go:42: 02:59:42 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:42 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:42 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-perfect-hippo-monitoring-mysql-1 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:42 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 02:59:42 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-1 logger.go:42: 02:59:42 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:42 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:42 | monitoring/4-check-metrics | + local start=1742957922 logger.go:42: 02:59:42 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:42 | monitoring/4-check-metrics | + local end=1742957982 logger.go:42: 02:59:42 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:44 | monitoring/4-check-metrics | "107" logger.go:42: 02:59:44 | monitoring/4-check-metrics | "117" logger.go:42: 02:59:44 | monitoring/4-check-metrics | + for i in '$(seq 0 2)' logger.go:42: 02:59:44 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:44 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:44 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-perfect-hippo-monitoring-mysql-2 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:44 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 02:59:44 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-2 logger.go:42: 02:59:44 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:44 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:44 | monitoring/4-check-metrics | + local start=1742957924 logger.go:42: 02:59:44 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:44 | monitoring/4-check-metrics | + local end=1742957984 logger.go:42: 02:59:44 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:46 | monitoring/4-check-metrics | "1742954218" logger.go:42: 02:59:46 | monitoring/4-check-metrics | "1742954218" logger.go:42: 02:59:46 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 02:59:46 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-perfect-hippo get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 02:59:46 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-perfect-hippo-monitoring-mysql-2 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:46 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 02:59:46 | monitoring/4-check-metrics | + local instance=kuttl-test-perfect-hippo-monitoring-mysql-2 logger.go:42: 02:59:46 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 02:59:46 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s -d '-1 minute' logger.go:42: 02:59:46 | monitoring/4-check-metrics | + local start=1742957926 logger.go:42: 02:59:46 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%s logger.go:42: 02:59:46 | monitoring/4-check-metrics | + local end=1742957986 logger.go:42: 02:59:46 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 02:59:48 | monitoring/4-check-metrics | "40" logger.go:42: 02:59:48 | monitoring/4-check-metrics | "50" logger.go:42: 02:59:48 | monitoring/4-check-metrics | + sleep 90 logger.go:42: 03:01:18 | monitoring/4-check-metrics | + get_qan20_values monitoring-mysql-0 glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local instance=monitoring-mysql-0 logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local token=glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d logger.go:42: 03:01:18 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local start=2025-03-26T02:31:18 logger.go:42: 03:01:18 | monitoring/4-check-metrics | ++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local end=2025-03-26T03:01:18 logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local endpoint=monitoring-service logger.go:42: 03:01:18 | monitoring/4-check-metrics | ++ cat logger.go:42: 03:01:18 | monitoring/4-check-metrics | +++ /usr/bin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 03:01:18 | monitoring/4-check-metrics | +++ /usr/bin/date -u +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 03:01:18 | monitoring/4-check-metrics | + local 'payload={ logger.go:42: 03:01:18 | monitoring/4-check-metrics | "columns":[ logger.go:42: 03:01:18 | monitoring/4-check-metrics | "load", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "num_queries", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "query_time" logger.go:42: 03:01:18 | monitoring/4-check-metrics | ], logger.go:42: 03:01:18 | monitoring/4-check-metrics | "first_seen": false, logger.go:42: 03:01:18 | monitoring/4-check-metrics | "group_by": "queryid", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "include_only_fields": [], logger.go:42: 03:01:18 | monitoring/4-check-metrics | "keyword": "", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "labels": [ logger.go:42: 03:01:18 | monitoring/4-check-metrics | { logger.go:42: 03:01:18 | monitoring/4-check-metrics | "key": "cluster", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "value": ["monitoring"] logger.go:42: 03:01:18 | monitoring/4-check-metrics | }], logger.go:42: 03:01:18 | monitoring/4-check-metrics | "limit": 10, logger.go:42: 03:01:18 | monitoring/4-check-metrics | "offset": 0, logger.go:42: 03:01:18 | monitoring/4-check-metrics | "order_by": "-load", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "main_metric": "load", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "period_start_from": "2025-03-25T15:01:18+00:00", logger.go:42: 03:01:18 | monitoring/4-check-metrics | "period_start_to": "2025-03-26T03:01:18+00:00" logger.go:42: 03:01:18 | monitoring/4-check-metrics | }' logger.go:42: 03:01:18 | monitoring/4-check-metrics | + jq '.rows[].fingerprint' logger.go:42: 03:01:18 | monitoring/4-check-metrics | ++ sed 's/\n//g' logger.go:42: 03:01:18 | monitoring/4-check-metrics | ++ echo '{' '"columns":[' '"load",' '"num_queries",' '"query_time"' '],' '"first_seen":' false, '"group_by":' '"queryid",' '"include_only_fields":' '[],' '"keyword":' '"",' '"labels":' '[' '{' '"key":' '"cluster",' '"value":' '["monitoring"]' '}],' '"limit":' 10, '"offset":' 0, '"order_by":' '"-load",' '"main_metric":' '"load",' '"period_start_from":' '"2025-03-25T15:01:18+00:00",' '"period_start_to":' '"2025-03-26T03:01:18+00:00"' '}' logger.go:42: 03:01:18 | monitoring/4-check-metrics | + run_curl -XPOST -d ''\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2025-03-25T15:01:18+00:00", "period_start_to": "2025-03-26T03:01:18+00:00" }'\''' '-H '\''Authorization: Bearer glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d'\''' https://@monitoring-service/v1/qan/metrics:getReport logger.go:42: 03:01:18 | monitoring/4-check-metrics | + kubectl -n kuttl-test-perfect-hippo exec mysql-client -- bash -c 'curl -s -k -XPOST -d '\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2025-03-25T15:01:18+00:00", "period_start_to": "2025-03-26T03:01:18+00:00" }'\'' -H '\''Authorization: Bearer glsa_VX0PJHz41gZFYynv9MrnRKxsZmv7uAoC_4704083d'\'' https://@monitoring-service/v1/qan/metrics:getReport' logger.go:42: 03:01:20 | monitoring/4-check-metrics | "TOTAL" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "REPLACE INTO `sys_operator` . `heartbeat` ( `ts` , `server_id` , FILE , `position` , `relay_source_log_file` , `exec_source_log_pos` ) VALUES (...)" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT `EVENT_NAME` , `COUNT_STAR` , `SUM_TIMER_WAIT` FROM `performance_schema` . `events_waits_summary_global_by_event_name`" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SHOW GLOBAL VARIABLES LIKE ?" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS LIKE ?" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT `t` . `table_schema` , `t` . `table_name` , COLUMN_NAME , AUTO_INCREMENT , `pow` ( ? , CASE `data_type` WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? END + ( `column_type` LIKE ? ) ) - ? AS `max_int` FROM `information_schema` . `columns` `c` STRAIGHT_JOIN `information_schema` . `tables` `t` ON BINARY `t` . `table_schema` = `c` . `table_schema` AND BINARY `t` . `table_name` = `c` . `table_name` WHERE `c` . `extra` = ? AND `t` . `auto_increment` IS NOT NULL" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT SYSTEM_USER , `substring_index` ( HOST , ?, ... ) AS `slave_hostname` FROM `information_schema` . `processlist` WHERE `command` IN (...)" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT COUNT ( * ) > ? AND MAX ( `User_name` ) != ? FROM `mysql` . `slave_master_info`" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT COLUMN_NAME FROM `information_schema` . `columns` WHERE `table_schema` = ? AND TABLE_NAME = ? AND COLUMN_NAME IN (...) LIMIT ?" logger.go:42: 03:01:20 | monitoring/4-check-metrics | "SELECT `performance_schema` . `events_statements_history` . `SQL_TEXT` , `performance_schema` . `events_statements_history` . `DIGEST` , `performance_schema` . `events_statements_history` . `DIGEST_TEXT` , `performance_schema` . `events_statements_history` . `CURRENT_SCHEMA` FROM `performance_schema` . `events_statements_history` WHERE `DIGEST` IS NOT NULL AND `SQL_TEXT` IS NOT NULL" logger.go:42: 03:01:20 | monitoring/4-check-metrics | ++ get_service_ip monitoring-haproxy logger.go:42: 03:01:20 | monitoring/4-check-metrics | ++ local service=monitoring-haproxy logger.go:42: 03:01:20 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 03:01:20 | monitoring/4-check-metrics | ++ grep -q NotFound logger.go:42: 03:01:21 | monitoring/4-check-metrics | +++ kubectl get service/monitoring-haproxy -n kuttl-test-perfect-hippo -o 'jsonpath={.spec.type}' logger.go:42: 03:01:21 | monitoring/4-check-metrics | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 03:01:21 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 03:01:21 | monitoring/4-check-metrics | ++ egrep -q 'hostname|ip' logger.go:42: 03:01:21 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 03:01:22 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-perfect-hippo -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 03:01:22 | monitoring/4-check-metrics | + haproxy_svc=35.239.51.223 logger.go:42: 03:01:22 | monitoring/4-check-metrics | ++ curl -s -o /dev/null -w '%{http_code}' http://35.239.51.223:8404/metrics logger.go:42: 03:01:23 | monitoring/4-check-metrics | + http_code=200 logger.go:42: 03:01:23 | monitoring/4-check-metrics | + [[ 200 != 200 ]] logger.go:42: 03:01:23 | monitoring/4-check-metrics | test step completed 4-check-metrics logger.go:42: 03:01:23 | monitoring/5-check-password-leak | starting test step 5-check-password-leak logger.go:42: 03:01:23 | monitoring/5-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + source ../../functions logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ realpath ../../.. logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++++ pwd logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ test_name=monitoring logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export GIT_BRANCH=PR-873 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ GIT_BRANCH=PR-873 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export VERSION=PR-873-af2827af logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ VERSION=PR-873-af2827af logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++++ which gdate logger.go:42: 03:01:23 | monitoring/5-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++++ which date logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ date=/usr/bin/date logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ oc get projects logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ : logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ kubectl get nodes logger.go:42: 03:01:23 | monitoring/5-check-password-leak | +++ grep '^minikube' logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + check_passwords_leak logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + local secrets logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + local passwords logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + local pods logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + secrets= logger.go:42: 03:01:23 | monitoring/5-check-password-leak | + passwords=' ' logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pods -o name logger.go:42: 03:01:23 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + pods='monitoring-0 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-haproxy-0 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-haproxy-1 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-haproxy-2 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-mysql-0 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-mysql-1 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-mysql-2 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-orc-0 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-orc-1 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | monitoring-orc-2 logger.go:42: 03:01:24 | monitoring/5-check-password-leak | mysql-client' logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + collect_logs kuttl-test-perfect-hippo logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + local containers logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + local count logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + NS=kuttl-test-perfect-hippo logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:24 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + containers=pmm logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:24 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-0 -c pmm logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-pmm.txt logger.go:42: 03:01:25 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-0-pmm.txt logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:25 | monitoring/5-check-password-leak | logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:25 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:25 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-0 -c haproxy logger.go:42: 03:01:26 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 03:01:26 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-haproxy.txt logger.go:42: 03:01:26 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:26 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-0 -c mysql-monit logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 03:01:27 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-mysql-monit.txt logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-0 -c pmm-client logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 03:01:27 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-0-pmm-client.txt logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:27 | monitoring/5-check-password-leak | logger.go:42: 03:01:27 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:27 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:28 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 03:01:28 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:28 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-1 -c haproxy logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 03:01:29 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-haproxy.txt logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-1 -c mysql-monit logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 03:01:29 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-mysql-monit.txt logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:29 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-1 -c pmm-client logger.go:42: 03:01:30 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 03:01:30 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-1-pmm-client.txt logger.go:42: 03:01:30 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:30 | monitoring/5-check-password-leak | logger.go:42: 03:01:30 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:30 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + containers='haproxy mysql-monit pmm-client' logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-2 -c haproxy logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 03:01:31 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-haproxy.txt logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:31 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-2 -c mysql-monit logger.go:42: 03:01:32 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 03:01:32 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-mysql-monit.txt logger.go:42: 03:01:32 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:32 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-haproxy-2 -c pmm-client logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 03:01:33 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-haproxy-2-pmm-client.txt logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:33 | monitoring/5-check-password-leak | logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:33 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:33 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-0 -c mysql logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 03:01:34 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-mysql.txt logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-0 -c xtrabackup logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 03:01:34 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-xtrabackup.txt logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:34 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-0 -c pt-heartbeat logger.go:42: 03:01:35 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 03:01:35 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pt-heartbeat.txt logger.go:42: 03:01:35 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:35 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-0 -c pmm-client logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 03:01:36 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-0-pmm-client.txt logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:36 | monitoring/5-check-password-leak | logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:36 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:36 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-1 -c mysql logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 03:01:37 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-mysql.txt logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-1 -c xtrabackup logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 03:01:37 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-xtrabackup.txt logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:37 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-1 -c pt-heartbeat logger.go:42: 03:01:38 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 03:01:38 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pt-heartbeat.txt logger.go:42: 03:01:38 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:38 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-1 -c pmm-client logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 03:01:39 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-1-pmm-client.txt logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:39 | monitoring/5-check-password-leak | logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:39 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + containers='mysql xtrabackup pt-heartbeat pmm-client' logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:39 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-2 -c mysql logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 03:01:40 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-mysql.txt logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-2 -c xtrabackup logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 03:01:40 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-xtrabackup.txt logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:40 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-2 -c pt-heartbeat logger.go:42: 03:01:41 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 03:01:41 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pt-heartbeat.txt logger.go:42: 03:01:41 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:41 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-mysql-2 -c pmm-client logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 03:01:42 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-mysql-2-pmm-client.txt logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:42 | monitoring/5-check-password-leak | logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:42 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:42 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-0 -c orc logger.go:42: 03:01:43 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 03:01:43 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-orc.txt logger.go:42: 03:01:43 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:43 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-0 -c mysql-monit logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 03:01:44 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-0-mysql-monit.txt logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:44 | monitoring/5-check-password-leak | logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:44 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:44 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-1 -c orc logger.go:42: 03:01:45 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 03:01:45 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-orc.txt logger.go:42: 03:01:45 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:45 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-1 -c mysql-monit logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 03:01:46 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-1-mysql-monit.txt logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:46 | monitoring/5-check-password-leak | logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:46 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod monitoring-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:46 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-2 -c orc logger.go:42: 03:01:47 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 03:01:47 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-orc.txt logger.go:42: 03:01:47 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:47 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs monitoring-orc-2 -c mysql-monit logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 03:01:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-monitoring-orc-2-mysql-monit.txt logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:48 | monitoring/5-check-password-leak | logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:48 | monitoring/5-check-password-leak | ++ kubectl -n kuttl-test-perfect-hippo get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + containers=mysql-client logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + kubectl -n kuttl-test-perfect-hippo logs mysql-client -c mysql-client logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 03:01:48 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-mysql-client-mysql-client.txt logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:48 | monitoring/5-check-password-leak | logger.go:42: 03:01:48 | monitoring/5-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 03:01:48 | monitoring/5-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 03:01:48 | monitoring/5-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + pods=percona-server-mysql-operator-5bfb78b44c-2hwqh logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + collect_logs ps-operator logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + local containers logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + local count logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + NS=ps-operator logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + for p in '$pods' logger.go:42: 03:01:49 | monitoring/5-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-5bfb78b44c-2hwqh -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + containers=manager logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + for c in '$containers' logger.go:42: 03:01:49 | monitoring/5-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-5bfb78b44c-2hwqh -c manager logger.go:42: 03:01:50 | monitoring/5-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-5bfb78b44c-2hwqh-manager.txt logger.go:42: 03:01:50 | monitoring/5-check-password-leak | logs saved in: /tmp/kuttl/ps/monitoring/logs_output-percona-server-mysql-operator-5bfb78b44c-2hwqh-manager.txt logger.go:42: 03:01:50 | monitoring/5-check-password-leak | + echo logger.go:42: 03:01:50 | monitoring/5-check-password-leak | logger.go:42: 03:01:50 | monitoring/5-check-password-leak | test step completed 5-check-password-leak logger.go:42: 03:01:50 | monitoring/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 03:01:51 | monitoring/98-drop-finalizer | PerconaServerMySQL:kuttl-test-perfect-hippo/monitoring updated logger.go:42: 03:01:51 | monitoring/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/tests/monitoring logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++ test_name=monitoring logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/vars.sh logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-873 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/deploy logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-873/e2e-tests/conf logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-873 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-873 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export VERSION=PR-873-af2827af logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ VERSION=PR-873-af2827af logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-873-af2827af logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.0 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.0 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.16.3 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.16.3 logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-873/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | ++++ which date logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ : logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 03:01:51 | monitoring/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 03:01:52 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:01:53 | monitoring/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 03:01:58 | monitoring/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 03:01:59 | monitoring | monitoring events from ns kuttl-test-perfect-hippo: logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:24 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/mysql-client to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:24 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "percona/percona-server:8.0.33" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmm-storage-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-perfect-hippo/pmm-storage-monitoring-0" pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:42 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:45 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-630b2d5b-1c1b-40e2-b1db-f72193d5c042 pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:46 +0000 UTC Normal Pod monitoring-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:50 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "percona/percona-server:8.0.33" in 25.657s (25.657s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:50 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:50 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:53 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-630b2d5b-1c1b-40e2-b1db-f72193d5c042" attachdetach-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:49:55 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Pulling Pulling image "percona/pmm-server:3.0.0" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:21 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:43 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Pulled Successfully pulled image "percona/pmm-server:3.0.0" in 47.688s (47.688s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:43 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Created Created container: pmm kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:43 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Started Started container pmm kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:44 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: Get "http://10.172.34.12:8080/v1/readyz": dial tcp 10.172.34.12:8080: connect: connection refused kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:50 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 500 kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:50:55 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:23 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:23 +0000 UTC Normal Service monitoring-haproxy EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:23 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:24 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-perfect-hippo/datadir-monitoring-mysql-0" pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:24 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:24 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:24 +0000 UTC Normal Pod monitoring-orc-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-orc-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-nrth default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:24 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:25 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:25 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 168ms (168ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:25 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:25 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:26 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:26 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 148ms (148ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:26 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:26 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:26 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:27 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-2c9d46b9-4d8f-42d6-bb7f-f4b8b77c4ff6 pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:27 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 127ms (127ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:27 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:27 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:28 +0000 UTC Normal Pod monitoring-mysql-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:35 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2c9d46b9-4d8f-42d6-bb7f-f4b8b77c4ff6" attachdetach-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:37 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:37 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 165ms (165ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:37 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:37 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 112ms (112ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 133ms (133ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:39 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:40 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 114ms (114ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:40 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:40 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:40 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:50 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 10.242s (10.242s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:50 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:50 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:51:52 +0000 UTC Normal Service monitoring-haproxy EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:00 +0000 UTC Normal Pod monitoring-orc-1 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-orc-1 to gke-jen-ps-873-af2827af--default-pool-fc15a126-727k default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:00 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:00 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 218ms (218ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:00 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:01 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:01 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:02 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 150ms (150ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 146ms (146ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:03 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:12 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:12 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:12 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-perfect-hippo/datadir-monitoring-mysql-1" pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:12 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:12 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:16 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-a32cc708-61e5-4986-8bb4-d368e5888d76 pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:16 +0000 UTC Normal Pod monitoring-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-727k default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:16 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 258ms (258ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:17 +0000 UTC Normal Pod monitoring-mysql-1 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-1 to gke-jen-ps-873-af2827af--default-pool-fc15a126-nrth default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:19 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 155ms (155ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 151ms (151ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:25 +0000 UTC Normal Pod monitoring-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a32cc708-61e5-4986-8bb4-d368e5888d76" attachdetach-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:28 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:28 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 183ms (183ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:28 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:28 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 8.977s (8.977s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:29 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:29 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-1 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 170ms (170ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 120ms (120ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 115ms (115ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 109ms (109ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:30 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 109ms (109ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 116ms (116ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:32 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 197ms (197ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal Pod monitoring-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-2 to gke-jen-ps-873-af2827af--default-pool-fc15a126-nrth default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:33 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 197ms (197ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-orc-2 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-orc-2 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:35 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:36 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 170ms (170ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:36 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:36 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 127ms (128ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:38 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 127ms (127ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 123ms (123ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 151ms (151ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:39 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:42 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 3.432s (3.432s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:42 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 12.363s (12.363s including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:42 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:43 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:43 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:43 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:49 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/26 02:52:47 Waiting for MySQL ready state 2025/03/26 02:52:47 MySQL is ready 2025/03/26 02:52:47 Peers: [3261623963323164.monitoring-mysql-unready.kuttl-test-perfect-hippo 3364343431373831.monitoring-mysql-unready.kuttl-test-perfect-hippo] 2025/03/26 02:52:47 FQDN: monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:52:48 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo] 2025/03/26 02:52:48 lookup monitoring-mysql-1 [10.172.32.24] 2025/03/26 02:52:48 PodIP: 10.172.32.24 2025/03/26 02:52:48 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo [10.172.34.13] 2025/03/26 02:52:48 PrimaryIP: 10.172.34.13 2025/03/26 02:52:48 Donor: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:52:48 Opening connection to 10.172.32.24 2025/03/26 02:52:48 Clone required: true 2025/03/26 02:52:48 Checking if a clone in progress 2025/03/26 02:52:48 Clone in progress: false 2025/03/26 02:52:48 Cloning from monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:52:49 Clone finished. Restarting container... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:49 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:52:54 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 141ms (141ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-perfect-hippo/datadir-monitoring-mysql-2" pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:28 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:28 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:31 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-93066a16-707b-4786-a714-4700c0e77dea pd.csi.storage.gke.io_gke-cc42a553fa8245b69da1-ef39-6e0d-vm_72dada09-86a5-443a-98ea-f213eab8b957 logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:32 +0000 UTC Normal Pod monitoring-mysql-2 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-2 to gke-jen-ps-873-af2827af--default-pool-fc15a126-727k default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:39 +0000 UTC Normal Pod monitoring-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-93066a16-707b-4786-a714-4700c0e77dea" attachdetach-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:41 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:41 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 215ms (215ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:41 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:41 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 120ms (120ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 159ms (159ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 132ms (132ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:43 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 345ms (346ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:53:44 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:02 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/26 02:54:01 Waiting for MySQL ready state 2025/03/26 02:54:01 MySQL is ready 2025/03/26 02:54:01 Peers: [3261623963323164.monitoring-mysql-unready.kuttl-test-perfect-hippo 3332616237393230.monitoring-mysql-unready.kuttl-test-perfect-hippo 3364343431373831.monitoring-mysql-unready.kuttl-test-perfect-hippo] 2025/03/26 02:54:01 FQDN: monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:54:01 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo] 2025/03/26 02:54:01 lookup monitoring-mysql-2 [10.172.33.26] 2025/03/26 02:54:01 PodIP: 10.172.33.26 2025/03/26 02:54:01 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo [10.172.34.13] 2025/03/26 02:54:01 PrimaryIP: 10.172.34.13 2025/03/26 02:54:01 Donor: monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:54:01 Opening connection to 10.172.33.26 2025/03/26 02:54:01 Clone required: true 2025/03/26 02:54:01 Checking if a clone in progress 2025/03/26 02:54:01 Clone in progress: false 2025/03/26 02:54:01 Cloning from monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:54:02 Clone finished. Restarting container... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:02 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:51 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:51 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:51 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:51 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:52 +0000 UTC Normal Pod monitoring-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-2 to gke-jen-ps-873-af2827af--default-pool-fc15a126-nrth default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:53 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:53 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 152ms (152ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:53 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:53 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 133ms (133ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 119ms (119ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:55 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 194ms (194ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:56 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:57 +0000 UTC Normal Pod monitoring-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-1 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:57 +0000 UTC Normal Pod monitoring-mysql-1 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-1 to gke-jen-ps-873-af2827af--default-pool-fc15a126-nrth default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 191ms (191ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:59 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 172ms (172ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:54:59 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 137ms (137ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 125ms (125ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 200ms (200ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 122ms (122ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 123ms (123ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:01 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 116ms (116ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 217ms (217ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:02 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:11 +0000 UTC Normal Pod monitoring-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-haproxy-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-727k default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:12 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:12 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 194ms (194ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:12 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:12 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:14 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:14 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 140ms (140ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:14 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:14 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:14 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 151ms (151ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 266ms (266ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:15 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:20 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/26 02:55:18 Waiting for MySQL ready state 2025/03/26 02:55:18 MySQL is ready 2025/03/26 02:55:18 Peers: [3164643061333261.monitoring-mysql-unready.kuttl-test-perfect-hippo 3261623963323164.monitoring-mysql-unready.kuttl-test-perfect-hippo 3332616237393230.monitoring-mysql-unready.kuttl-test-perfect-hippo] 2025/03/26 02:55:18 FQDN: monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:55:18 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo] 2025/03/26 02:55:18 lookup monitoring-mysql-1 [10.172.32.27] 2025/03/26 02:55:18 PodIP: 10.172.32.27 2025/03/26 02:55:18 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo [10.172.34.13] 2025/03/26 02:55:18 PrimaryIP: 10.172.34.13 2025/03/26 02:55:19 Donor: monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:55:19 Opening connection to 10.172.32.27 2025/03/26 02:55:19 Clone required: true 2025/03/26 02:55:19 Checking if a clone in progress 2025/03/26 02:55:19 Clone in progress: false 2025/03/26 02:55:19 Cloning from monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:55:20 Clone finished. Restarting container... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:20 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:55:23 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 130ms (131ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:03 +0000 UTC Normal Pod monitoring-mysql-2 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-2 to gke-jen-ps-873-af2827af--default-pool-fc15a126-727k default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:04 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:04 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 228ms (228ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:04 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:05 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:06 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:06 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 132ms (132ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 148ms (148ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 136ms (136ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 245ms (245ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:08 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:25 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/26 02:56:24 Waiting for MySQL ready state 2025/03/26 02:56:24 MySQL is ready 2025/03/26 02:56:24 Peers: [3164643061333261.monitoring-mysql-unready.kuttl-test-perfect-hippo 3261623963323164.monitoring-mysql-unready.kuttl-test-perfect-hippo 6635613764653336.monitoring-mysql-unready.kuttl-test-perfect-hippo] 2025/03/26 02:56:24 FQDN: monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:56:24 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo] 2025/03/26 02:56:24 lookup monitoring-mysql-2 [10.172.33.28] 2025/03/26 02:56:24 PodIP: 10.172.33.28 2025/03/26 02:56:24 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo [10.172.34.13] 2025/03/26 02:56:24 PrimaryIP: 10.172.34.13 2025/03/26 02:56:24 Donor: monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:56:24 Opening connection to 10.172.33.28 2025/03/26 02:56:24 Clone required: true 2025/03/26 02:56:24 Checking if a clone in progress 2025/03/26 02:56:24 Clone in progress: false 2025/03/26 02:56:24 Cloning from monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:56:25 Clone finished. Restarting container... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:25 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:56:28 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 159ms (159ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:01 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:02 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/03/26 02:57:02 MySQL state is not ready... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:07 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/03/26 02:57:07 MySQL state is not ready... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:21 +0000 UTC Normal Pod monitoring-mysql-0 Binding Scheduled Successfully assigned kuttl-test-perfect-hippo/monitoring-mysql-0 to gke-jen-ps-873-af2827af--default-pool-fc15a126-bfzn default-scheduler logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:23 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-873-af2827af" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:23 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-873-af2827af" in 190ms (190ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:23 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:23 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:25 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:25 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 129ms (129ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:25 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:25 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:25 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 134ms (134ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 122ms (122ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 195ms (195ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:26 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:44 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/03/26 02:57:43 Waiting for MySQL ready state 2025/03/26 02:57:43 MySQL is ready 2025/03/26 02:57:43 Peers: [3164643061333261.monitoring-mysql-unready.kuttl-test-perfect-hippo 3765616264656231.monitoring-mysql-unready.kuttl-test-perfect-hippo 6635613764653336.monitoring-mysql-unready.kuttl-test-perfect-hippo] 2025/03/26 02:57:43 FQDN: monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:57:43 Primary: monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo Replicas: [monitoring-mysql-0.monitoring-mysql.kuttl-test-perfect-hippo monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo] 2025/03/26 02:57:43 lookup monitoring-mysql-0 [10.172.34.17] 2025/03/26 02:57:43 PodIP: 10.172.34.17 2025/03/26 02:57:43 lookup monitoring-mysql-1.monitoring-mysql.kuttl-test-perfect-hippo [10.172.32.27] 2025/03/26 02:57:43 PrimaryIP: 10.172.32.27 2025/03/26 02:57:43 Donor: monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:57:43 Opening connection to 10.172.34.17 2025/03/26 02:57:43 Clone required: true 2025/03/26 02:57:43 Checking if a clone in progress 2025/03/26 02:57:43 Clone in progress: false 2025/03/26 02:57:43 Cloning from monitoring-mysql-2.monitoring-mysql.kuttl-test-perfect-hippo 2025/03/26 02:57:44 Clone finished. Restarting container... kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:44 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 02:57:47 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 112ms (112ms including waiting) kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:51 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:51 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:51 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:51 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:51 +0000 UTC Normal Service monitoring-haproxy DeletingLoadBalancer Deleting load balancer service-controller logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:59 | monitoring | 2025-03-26 03:01:52 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:59 | monitoring | Deleting namespace: kuttl-test-perfect-hippo === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (804.67s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/monitoring (804.24s) PASS