=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://136.113.85.188 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 45 tests === RUN kuttl/harness === RUN kuttl/harness/monitoring === PAUSE kuttl/harness/monitoring === CONT kuttl/harness/monitoring logger.go:42: 13:02:48 | monitoring | Creating namespace: kuttl-test-definite-hare logger.go:42: 13:02:48 | monitoring/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 13:02:48 | monitoring/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 13:02:48 | monitoring/0-deploy-operator | + source ../../functions logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ realpath ../../.. logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++++ pwd logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++ test_name=monitoring logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ [[ -z 8.4 ]] logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ MYSQL_VERSION=8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++++ which gdate logger.go:42: 13:02:48 | monitoring/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:02:48 | monitoring/0-deploy-operator | ++++ which date logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ oc get projects logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ : logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ kubectl get nodes logger.go:42: 13:02:48 | monitoring/0-deploy-operator | +++ grep '^minikube' logger.go:42: 13:02:49 | monitoring/0-deploy-operator | +++ which gsed logger.go:42: 13:02:49 | monitoring/0-deploy-operator | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:02:49 | monitoring/0-deploy-operator | +++ which sed logger.go:42: 13:02:49 | monitoring/0-deploy-operator | ++ sed=/usr/sbin/sed logger.go:42: 13:02:49 | monitoring/0-deploy-operator | ++ oc get projects logger.go:42: 13:02:49 | monitoring/0-deploy-operator | +++ kubectl version -o json logger.go:42: 13:02:49 | monitoring/0-deploy-operator | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:02:49 | monitoring/0-deploy-operator | +++ grep '\-eks\-' logger.go:42: 13:02:49 | monitoring/0-deploy-operator | grep: warning: stray \ before - logger.go:42: 13:02:49 | monitoring/0-deploy-operator | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:02:49 | monitoring/0-deploy-operator | ++ '[' ']' logger.go:42: 13:02:49 | monitoring/0-deploy-operator | ++ EKS=0 logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + init_temp_dir logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + rm -rf /tmp/kuttl/ps/monitoring logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/monitoring logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + deploy_operator logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + destroy_operator logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 13:02:49 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:02:49 | monitoring/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + true logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 13:02:49 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 13:02:50 | monitoring/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:02:50 | monitoring/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + true logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + create_namespace ps-operator logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + local namespace=ps-operator logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + [[ -n '' ]] logger.go:42: 13:02:50 | monitoring/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 13:02:51 | monitoring/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 13:02:51 | monitoring/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 13:02:51 | monitoring/0-deploy-operator | namespace/ps-operator created logger.go:42: 13:02:51 | monitoring/0-deploy-operator | + apply_crd logger.go:42: 13:02:51 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/crd.yaml logger.go:42: 13:02:52 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 13:02:53 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 13:02:54 | monitoring/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 13:02:54 | monitoring/0-deploy-operator | + apply_rbac logger.go:42: 13:02:54 | monitoring/0-deploy-operator | + local rbac_file logger.go:42: 13:02:54 | monitoring/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 13:02:54 | monitoring/0-deploy-operator | + rbac_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cw-rbac.yaml logger.go:42: 13:02:54 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cw-rbac.yaml logger.go:42: 13:02:55 | monitoring/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 13:02:56 | monitoring/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 13:02:56 | monitoring/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 13:02:56 | monitoring/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 13:02:57 | monitoring/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + local operator_file logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + operator_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cw-operator.yaml logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "VERBOSE"' logger.go:42: 13:02:57 | monitoring/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 13:02:57 | monitoring/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-1125-703ecc3e"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cw-operator.yaml logger.go:42: 13:02:58 | monitoring/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 13:02:58 | monitoring/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 13:02:58 | monitoring/0-deploy-operator | + deploy_cluster_secrets logger.go:42: 13:02:58 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-definite-hare apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf/secrets.yaml logger.go:42: 13:02:59 | monitoring/0-deploy-operator | secret/test-secrets created logger.go:42: 13:02:59 | monitoring/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 13:02:59 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-definite-hare apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf/ssl-secret.yaml logger.go:42: 13:03:00 | monitoring/0-deploy-operator | secret/test-ssl created logger.go:42: 13:03:00 | monitoring/0-deploy-operator | + deploy_client logger.go:42: 13:03:00 | monitoring/0-deploy-operator | + kubectl -n kuttl-test-definite-hare apply -f - logger.go:42: 13:03:00 | monitoring/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:03:00 | monitoring/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf/client.yaml logger.go:42: 13:03:02 | monitoring/0-deploy-operator | pod/mysql-client created logger.go:42: 13:03:02 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:03:02 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:03:03 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:03:04 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:03:04 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:03:04 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:03:06 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:03:06 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:03:06 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:03:07 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:03:07 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:03:08 | monitoring/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:03:09 | monitoring/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:03:09 | monitoring/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:03:10 | monitoring/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 13:03:10 | monitoring/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 13:03:10 | monitoring/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 13:03:10 | monitoring/0-deploy-operator | ASSERT PASS logger.go:42: 13:03:10 | monitoring/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | starting test step 1-deploy-pmm-server logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_pmm_server sleep 120 # wait for PMM Server to start TOKEN=$(get_pmm_server_token) kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch "$(jq -n --arg token "$TOKEN" '{"stringData": {"pmmservertoken": $token}}')"] logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | + source ../../functions logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ realpath ../../.. logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++++ pwd logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++ test_name=monitoring logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ [[ -z 8.4 ]] logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ MYSQL_VERSION=8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export MINIO_VER=5.4.0 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ MINIO_VER=5.4.0 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ export VAULT_VER=0.16.1 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ VAULT_VER=0.16.1 logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++++ which gdate logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++++ which date logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ date=/usr/sbin/date logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ oc get projects logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ : logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ kubectl get nodes logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ grep '^minikube' logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ which gsed logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ which sed logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++ sed=/usr/sbin/sed logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | ++ oc get projects logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ kubectl version -o json logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | +++ grep '\-eks\-' logger.go:42: 13:03:10 | monitoring/1-deploy-pmm-server | grep: warning: stray \ before - logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | ++ '[' ']' logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | ++ EKS=0 logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + deploy_pmm_server logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + helm uninstall -n kuttl-test-definite-hare monitoring logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | Error: uninstall: Release not loaded: monitoring: release: not found logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + : logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + helm repo remove percona logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | Error: no repo named "percona" found logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + : logger.go:42: 13:03:11 | monitoring/1-deploy-pmm-server | + kubectl delete clusterrole monitoring --ignore-not-found logger.go:42: 13:03:12 | monitoring/1-deploy-pmm-server | + kubectl delete clusterrolebinding monitoring --ignore-not-found logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + helm repo add percona https://percona.github.io/percona-helm-charts/ logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | "percona" has been added to your repositories logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + helm repo update logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | Hang tight while we grab the latest from your chart repositories... logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | ...Successfully got an update from the "minio" chart repository logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | ...Successfully got an update from the "chaos-mesh" chart repository logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | ...Successfully got an update from the "hashicorp" chart repository logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | ...Successfully got an update from the "percona" chart repository logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | Update Complete. ⎈Happy Helming!⎈ logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + [[ -n '' ]] logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + retry 10 120 helm install monitoring percona/pmm -n kuttl-test-definite-hare --set fullnameOverride=monitoring --version 1.4.3 --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + local max=10 logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + local delay=120 logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + shift 2 logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + local n=1 logger.go:42: 13:03:13 | monitoring/1-deploy-pmm-server | + helm install monitoring percona/pmm -n kuttl-test-definite-hare --set fullnameOverride=monitoring --version 1.4.3 --set image.tag=3-dev-latest --set image.repository=perconalab/pmm-server --set service.type=LoadBalancer --force logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | NAME: monitoring logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | LAST DEPLOYED: Tue Oct 28 13:03:14 2025 logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | NAMESPACE: kuttl-test-definite-hare logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | STATUS: deployed logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | REVISION: 1 logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | TEST SUITE: None logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | NOTES: logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | Percona Monitoring and Management (PMM) logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | An open source database monitoring, observability and management tool logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | Check more info here: https://docs.percona.com/percona-monitoring-and-management/index.html logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | Get the application URL: logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | NOTE: It may take a few minutes for the LoadBalancer IP to be available. logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | You can watch the status of by running 'kubectl get --namespace kuttl-test-definite-hare svc -w monitoring-service' logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | export SERVICE_IP=$(kubectl get svc --namespace kuttl-test-definite-hare monitoring-service -o jsonpath="{.status.loadBalancer.ingress[0].ip}") logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | echo https://$SERVICE_IP: logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | Get password for the "admin" user: logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | export ADMIN_PASS=$(kubectl get secret pmm-secret --namespace kuttl-test-definite-hare -o jsonpath='{.data.PMM_ADMIN_PASSWORD}' | base64 --decode) logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | echo $ADMIN_PASS logger.go:42: 13:03:17 | monitoring/1-deploy-pmm-server | + sleep 120 logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ get_pmm_server_token logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ local key_name= logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ [[ -z '' ]] logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ key_name=operator logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ local ADMIN_PASSWORD logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | +++ kubectl -n kuttl-test-definite-hare get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | +++ base64 --decode logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ ADMIN_PASSWORD='^gl4}Q%QsB/#XZy>' logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ [[ -z ^gl4}Q%QsB/#XZy> ]] logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++ local create_response create_status_code create_json_response logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++++ get_service_ip monitoring-service logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++++ local service=monitoring-service logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:05:17 | monitoring/1-deploy-pmm-server | ++++ grep -q NotFound logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | +++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | ++++ egrep -q 'hostname|ip' logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:05:18 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:05:19 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:05:19 | monitoring/1-deploy-pmm-server | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator", "role":"Admin", "isDisabled":false}' --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts -w '\n%{http_code}' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ create_response='{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ tail -n1 logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ create_status_code=201 logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | 201' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ /usr/sbin/sed '$ d' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ create_json_response='{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ [[ 201 -ne 201 ]] logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ local service_account_id logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ echo '{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++ jq -r .id logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ service_account_id=2 logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ [[ -z 2 ]] logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ [[ 2 == \n\u\l\l ]] logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++ local token_response token_status_code token_json_response logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++++ get_service_ip monitoring-service logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++++ local service=monitoring-service logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | ++++ grep -q NotFound logger.go:42: 13:05:20 | monitoring/1-deploy-pmm-server | +++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | ++++ egrep -q 'hostname|ip' logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:05:21 | monitoring/1-deploy-pmm-server | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator"}' --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ token_response='{"id":1,"name":"operator","key":"glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b"} logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | +++ echo '{"id":1,"name":"operator","key":"glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b"} logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | +++ tail -n1 logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ token_status_code=200 logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | +++ echo '{"id":1,"name":"operator","key":"glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b"} logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | 200' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | +++ /usr/sbin/sed '$ d' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ token_json_response='{"id":1,"name":"operator","key":"glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b"}' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ [[ 200 -ne 200 ]] logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ echo '{"id":1,"name":"operator","key":"glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b"}' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ jq -r .key logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | + TOKEN=glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | ++ jq -n --arg token glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b '{"stringData": {"pmmservertoken": $token}}' logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | + kubectl patch -n kuttl-test-definite-hare secret test-secrets --type merge --patch '{ logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | "stringData": { logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | "pmmservertoken": "glsa_dKZJNjnSCi3iQTc1YNG3dGggU60RMYwS_95ad7c5b" logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | } logger.go:42: 13:05:22 | monitoring/1-deploy-pmm-server | }' logger.go:42: 13:05:23 | monitoring/1-deploy-pmm-server | secret/test-secrets patched logger.go:42: 13:05:24 | monitoring/1-deploy-pmm-server | test step completed 1-deploy-pmm-server logger.go:42: 13:05:24 | monitoring/2-create-cluster | starting test step 2-create-cluster logger.go:42: 13:05:24 | monitoring/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.secretsName="test-secrets"' - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.pmm.enabled = true' - \ | yq eval '.spec.pmm.mysqlParams = "--disable-tablestats-limit=2000"' - \ | yq eval '.spec.pmm.readinessProbes.initialDelaySeconds = 15' - \ | yq eval '.spec.pmm.readinessProbes.timeoutSeconds = 15' - \ | yq eval '.spec.pmm.readinessProbes.periodSeconds = 30' - \ | yq eval '.spec.pmm.readinessProbes.successThreshold = 1' - \ | yq eval '.spec.pmm.readinessProbes.failureThreshold = 5' - \ | yq eval '.spec.pmm.livenessProbes.initialDelaySeconds = 15' - \ | yq eval '.spec.pmm.livenessProbes.timeoutSeconds = 15' - \ | yq eval '.spec.pmm.livenessProbes.periodSeconds = 30' - \ | yq eval '.spec.pmm.livenessProbes.successThreshold = 1' - \ | yq eval '.spec.pmm.livenessProbes.failureThreshold = 5' - \ | yq eval '.spec.proxy.haproxy.enabled = true' - \ | yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 13:05:24 | monitoring/2-create-cluster | + source ../../functions logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ realpath ../../.. logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++++ pwd logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++ test_name=monitoring logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ [[ -z 8.4 ]] logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ MYSQL_VERSION=8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++++ which gdate logger.go:42: 13:05:24 | monitoring/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++++ which date logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ date=/usr/sbin/date logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ oc get projects logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ : logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ kubectl get nodes logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ grep '^minikube' logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ which gsed logger.go:42: 13:05:24 | monitoring/2-create-cluster | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ which sed logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++ sed=/usr/sbin/sed logger.go:42: 13:05:24 | monitoring/2-create-cluster | ++ oc get projects logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ kubectl version -o json logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:05:24 | monitoring/2-create-cluster | +++ grep '\-eks\-' logger.go:42: 13:05:24 | monitoring/2-create-cluster | grep: warning: stray \ before - logger.go:42: 13:05:25 | monitoring/2-create-cluster | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ '[' ']' logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ EKS=0 logger.go:42: 13:05:25 | monitoring/2-create-cluster | + get_cr logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local name_suffix= logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_mysql=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_backup=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_orchestrator=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_router=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_toolkit=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_haproxy=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local image_pmm_client=perconalab/pmm-client:3-dev-latest logger.go:42: 13:05:25 | monitoring/2-create-cluster | + local cr_file=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cr.yaml logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.enabled = true' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.mysqlParams = "--disable-tablestats-limit=2000"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.readinessProbes.initialDelaySeconds = 15' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.metadata.name="%s"' monitoring logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.readinessProbes.timeoutSeconds = 15' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.metadata.name="monitoring"' /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy/cr.yaml logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval 'del(.spec.secretsName)' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.readinessProbes.successThreshold = 1' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.readinessProbes.periodSeconds = 30' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-1125-703ecc3e"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.readinessProbes.failureThreshold = 5' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.livenessProbes.periodSeconds = 30' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.livenessProbes.timeoutSeconds = 15' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.livenessProbes.initialDelaySeconds = 15' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.livenessProbes.successThreshold = 1' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + '[' -n '' ']' logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval - logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:05:25 | monitoring/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.livenessProbes.failureThreshold = 5' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.4"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.4"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.expose.type = "LoadBalancer"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.proxy.haproxy.enabled = true' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.4"' - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + kubectl -n kuttl-test-definite-hare apply -f - logger.go:42: 13:05:25 | monitoring/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 13:05:26 | monitoring/2-create-cluster | perconaservermysql.ps.percona.com/monitoring created logger.go:42: 13:12:18 | monitoring/2-create-cluster | test step completed 2-create-cluster logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | starting test step 3-rotate-pmm-token logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | running command: [sh -c set -o errexit set -o xtrace source ../../functions wait_for_generation "sts/monitoring-mysql" 1 wait_for_generation "sts/monitoring-haproxy" 1 # add new PMM API token to secret NEW_TOKEN=$(get_pmm_server_token "operator-new") kubectl patch -n "${NAMESPACE}" secret test-secrets --type merge --patch "$(jq -n --arg token "$NEW_TOKEN" '{"stringData": {"pmmservertoken": $token}}')" # delete old PMM token delete_pmm_server_token "operator" sleep 10 wait_for_generation "sts/monitoring-mysql" 2 wait_for_generation "sts/monitoring-haproxy" 2] logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | + source ../../functions logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ realpath ../../.. logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++++ pwd logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++ test_name=monitoring logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ [[ -z 8.4 ]] logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ MYSQL_VERSION=8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export MINIO_VER=5.4.0 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ MINIO_VER=5.4.0 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ export VAULT_VER=0.16.1 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ VAULT_VER=0.16.1 logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++++ which gdate logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | ++++ which date logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ date=/usr/sbin/date logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ oc get projects logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ : logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ kubectl get nodes logger.go:42: 13:12:18 | monitoring/3-rotate-pmm-token | +++ grep '^minikube' logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | +++ which gsed logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | +++ which sed logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | ++ sed=/usr/sbin/sed logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | ++ oc get projects logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | +++ kubectl version -o json logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | +++ grep '\-eks\-' logger.go:42: 13:12:19 | monitoring/3-rotate-pmm-token | grep: warning: stray \ before - logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | ++ '[' ']' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | ++ EKS=0 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + wait_for_generation sts/monitoring-mysql 1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + local resource=sts/monitoring-mysql logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + local target_generation=1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + echo 'Waiting for sts/monitoring-mysql to reach generation 1...' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | Waiting for sts/monitoring-mysql to reach generation 1... logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + true logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-definite-hare get sts/monitoring-mysql -o 'jsonpath={.metadata.generation}' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + current_generation=1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + '[' 1 -eq 1 ']' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + echo 'Resource sts/monitoring-mysql has reached generation 1.' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | Resource sts/monitoring-mysql has reached generation 1. logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + break logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + wait_for_generation sts/monitoring-haproxy 1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + local resource=sts/monitoring-haproxy logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + local target_generation=1 logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + echo 'Waiting for sts/monitoring-haproxy to reach generation 1...' logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | Waiting for sts/monitoring-haproxy to reach generation 1... logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | + true logger.go:42: 13:12:20 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-definite-hare get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | + current_generation=1 logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | + '[' 1 -eq 1 ']' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | + echo 'Resource sts/monitoring-haproxy has reached generation 1.' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | Resource sts/monitoring-haproxy has reached generation 1. logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | + break logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ get_pmm_server_token operator-new logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ local key_name=operator-new logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ [[ -z operator-new ]] logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ local ADMIN_PASSWORD logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | +++ kubectl -n kuttl-test-definite-hare get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | +++ base64 --decode logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ ADMIN_PASSWORD='^gl4}Q%QsB/#XZy>' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ [[ -z ^gl4}Q%QsB/#XZy> ]] logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++ local create_response create_status_code create_json_response logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++++ get_service_ip monitoring-service logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++++ local service=monitoring-service logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:21 | monitoring/3-rotate-pmm-token | ++++ grep -q NotFound logger.go:42: 13:12:22 | monitoring/3-rotate-pmm-token | +++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:22 | monitoring/3-rotate-pmm-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:12:22 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:12:22 | monitoring/3-rotate-pmm-token | ++++ egrep -q 'hostname|ip' logger.go:42: 13:12:22 | monitoring/3-rotate-pmm-token | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:12:23 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:12:23 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:12:23 | monitoring/3-rotate-pmm-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"name":"operator-new", "role":"Admin", "isDisabled":false}' --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts -w '\n%{http_code}' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ create_response='{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ tail -n1 logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ create_status_code=201 logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""} logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | 201' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ /usr/sbin/sed '$ d' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ create_json_response='{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ [[ 201 -ne 201 ]] logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ local service_account_id logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ echo '{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":0,"avatarUrl":""}' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++ jq -r .id logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ service_account_id=3 logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ [[ -z 3 ]] logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ [[ 3 == \n\u\l\l ]] logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++ local token_response token_status_code token_json_response logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++++ get_service_ip monitoring-service logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++++ local service=monitoring-service logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | ++++ grep -q NotFound logger.go:42: 13:12:24 | monitoring/3-rotate-pmm-token | +++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:25 | monitoring/3-rotate-pmm-token | ++++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:12:25 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:12:25 | monitoring/3-rotate-pmm-token | ++++ egrep -q 'hostname|ip' logger.go:42: 13:12:25 | monitoring/3-rotate-pmm-token | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:12:25 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | +++ curl --insecure -s -X POST -H 'Content-Type: application/json' -d '{"name":"operator-new"}' --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts/3/tokens -w '\n%{http_code}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ token_response='{"id":2,"name":"operator-new","key":"glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496"} logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | +++ echo '{"id":2,"name":"operator-new","key":"glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496"} logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | +++ tail -n1 logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ token_status_code=200 logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | +++ echo '{"id":2,"name":"operator-new","key":"glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496"} logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | +++ /usr/sbin/sed '$ d' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ token_json_response='{"id":2,"name":"operator-new","key":"glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496"}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ [[ 200 -ne 200 ]] logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ echo '{"id":2,"name":"operator-new","key":"glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496"}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ jq -r .key logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | + NEW_TOKEN=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | ++ jq -n --arg token glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 '{"stringData": {"pmmservertoken": $token}}' logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | + kubectl patch -n kuttl-test-definite-hare secret test-secrets --type merge --patch '{ logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | "stringData": { logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | "pmmservertoken": "glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496" logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | } logger.go:42: 13:12:26 | monitoring/3-rotate-pmm-token | }' logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | secret/test-secrets patched logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + delete_pmm_server_token operator logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + local key_name=operator logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + [[ -z operator ]] logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + local ADMIN_PASSWORD logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-definite-hare get secret pmm-secret -o 'jsonpath={.data.PMM_ADMIN_PASSWORD}' logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | ++ base64 --decode logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + ADMIN_PASSWORD='^gl4}Q%QsB/#XZy>' logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + [[ -z ^gl4}Q%QsB/#XZy> ]] logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + local 'user_credentials=admin:^gl4}Q%QsB/#XZy>' logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | + local service_accounts_response service_accounts_status logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:27 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 13:12:28 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:28 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:12:28 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:12:28 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 13:12:28 | monitoring/3-rotate-pmm-token | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:12:29 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:12:29 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X GET --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts/search -w '\n%{http_code}' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + service_accounts_response='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + service_accounts_status=200 logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000} logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ /usr/sbin/sed '$ d' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + service_accounts_json='{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + local service_account_id logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ echo '{"totalCount":2,"serviceAccounts":[{"id":2,"uid":"cf2eyvzg70ef4a","name":"operator","login":"sa-1-operator","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/f741e7abdd8def4ed30a37f602429df3"},{"id":3,"uid":"ef2eziot3emf4f","name":"operator-new","login":"sa-1-operator-new","orgId":1,"isDisabled":false,"role":"Admin","tokens":1,"avatarUrl":"/graph/avatar/6d1b2db67a8f3e06db4ae076406fece5"}],"page":1,"perPage":1000}' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | ++ jq -r '.serviceAccounts[] | select(.name == "operator").id' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + service_account_id=2 logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + [[ -z 2 ]] logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + [[ 2 == \n\u\l\l ]] logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | + local tokens_response tokens_status tokens_json logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:30 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 13:12:31 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:31 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:12:31 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:12:31 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 13:12:31 | monitoring/3-rotate-pmm-token | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:12:32 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:12:32 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:12:32 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X GET --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts/2/tokens -w '\n%{http_code}' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + tokens_response='[{"id":1,"name":"operator","created":"2025-10-28T13:05:22Z","lastUsedAt":"2025-10-28T13:10:53Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-10-28T13:05:22Z","lastUsedAt":"2025-10-28T13:10:53Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + tokens_status=200 logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-10-28T13:05:22Z","lastUsedAt":"2025-10-28T13:10:53Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ /usr/sbin/sed '$ d' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + tokens_json='[{"id":1,"name":"operator","created":"2025-10-28T13:05:22Z","lastUsedAt":"2025-10-28T13:10:53Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + local token_id logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ jq -r '.[] | select(.name == "operator").id' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++ echo '[{"id":1,"name":"operator","created":"2025-10-28T13:05:22Z","lastUsedAt":"2025-10-28T13:10:53Z","expiration":null,"secondsUntilExpiration":0,"hasExpired":false,"isRevoked":false}]' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + token_id=1 logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + [[ -z 1 ]] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + [[ 1 == \n\u\l\l ]] logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | + local delete_response delete_status logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | +++ get_service_ip monitoring-service logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | +++ local service=monitoring-service logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | +++ grep -q NotFound logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:33 | monitoring/3-rotate-pmm-token | ++++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:12:34 | monitoring/3-rotate-pmm-token | +++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:12:34 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:12:34 | monitoring/3-rotate-pmm-token | +++ egrep -q 'hostname|ip' logger.go:42: 13:12:34 | monitoring/3-rotate-pmm-token | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:12:34 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | +++ kubectl get service/monitoring-service -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | ++ curl --insecure -s -X DELETE --user 'admin:^gl4}Q%QsB/#XZy>' https://34.58.226.20/graph/api/serviceaccounts/2/tokens/1 -w '\n%{http_code}' logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | + delete_response='{"message":"Service account token deleted"} logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | ++ echo '{"message":"Service account token deleted"} logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | 200' logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | ++ tail -n1 logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | + delete_status=200 logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | + [[ 200 -ne 200 ]] logger.go:42: 13:12:35 | monitoring/3-rotate-pmm-token | + sleep 10 logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | + wait_for_generation sts/monitoring-mysql 2 logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | + local resource=sts/monitoring-mysql logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | + local target_generation=2 logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | + echo 'Waiting for sts/monitoring-mysql to reach generation 2...' logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | Waiting for sts/monitoring-mysql to reach generation 2... logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | + true logger.go:42: 13:12:45 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-definite-hare get sts/monitoring-mysql -o 'jsonpath={.metadata.generation}' logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + current_generation=2 logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + '[' 2 -eq 2 ']' logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + echo 'Resource sts/monitoring-mysql has reached generation 2.' logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | Resource sts/monitoring-mysql has reached generation 2. logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + break logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + wait_for_generation sts/monitoring-haproxy 2 logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + local resource=sts/monitoring-haproxy logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + local target_generation=2 logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + echo 'Waiting for sts/monitoring-haproxy to reach generation 2...' logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | Waiting for sts/monitoring-haproxy to reach generation 2... logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | + true logger.go:42: 13:12:46 | monitoring/3-rotate-pmm-token | ++ kubectl -n kuttl-test-definite-hare get sts/monitoring-haproxy -o 'jsonpath={.metadata.generation}' logger.go:42: 13:12:47 | monitoring/3-rotate-pmm-token | + current_generation=2 logger.go:42: 13:12:47 | monitoring/3-rotate-pmm-token | + '[' 2 -eq 2 ']' logger.go:42: 13:12:47 | monitoring/3-rotate-pmm-token | + echo 'Resource sts/monitoring-haproxy has reached generation 2.' logger.go:42: 13:12:47 | monitoring/3-rotate-pmm-token | Resource sts/monitoring-haproxy has reached generation 2. logger.go:42: 13:12:47 | monitoring/3-rotate-pmm-token | + break logger.go:42: 13:16:08 | monitoring/3-rotate-pmm-token | test step completed 3-rotate-pmm-token logger.go:42: 13:16:08 | monitoring/4-check-metrics | starting test step 4-check-metrics logger.go:42: 13:16:08 | monitoring/4-check-metrics | running command: [sh -c set -o errexit set -o xtrace source ../../functions sleep 70 # we should wait more than one minute because `get_metric_values` gets data for the last 60 seconds TOKEN=$(kubectl get secret internal-monitoring -o jsonpath='{.data.pmmservertoken}' -n "${NAMESPACE}" | base64 --decode) for i in $(seq 0 2); do get_metric_values node_boot_time_seconds ${NAMESPACE}-$(get_cluster_name)-mysql-${i} $TOKEN get_metric_values mysql_global_status_uptime ${NAMESPACE}-$(get_cluster_name)-mysql-${i} $TOKEN done sleep 90 # wait for QAN get_qan20_values monitoring-mysql-0 $TOKEN haproxy_svc=$(get_service_ip "monitoring-haproxy") http_code=$(curl -s -o /dev/null -w "%{http_code}" http://${haproxy_svc}:8404/metrics) if [[ $http_code != 200 ]]; then echo "Error: http code is $http_code" exit 1 fi] logger.go:42: 13:16:08 | monitoring/4-check-metrics | + source ../../functions logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ realpath ../../.. logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++++ pwd logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++ test_name=monitoring logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ [[ -z 8.4 ]] logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ MYSQL_VERSION=8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export MINIO_VER=5.4.0 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ MINIO_VER=5.4.0 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ export VAULT_VER=0.16.1 logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ VAULT_VER=0.16.1 logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++++ which gdate logger.go:42: 13:16:08 | monitoring/4-check-metrics | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++++ which date logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ date=/usr/sbin/date logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ oc get projects logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ : logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ kubectl get nodes logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ grep '^minikube' logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ which gsed logger.go:42: 13:16:08 | monitoring/4-check-metrics | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ which sed logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++ sed=/usr/sbin/sed logger.go:42: 13:16:08 | monitoring/4-check-metrics | ++ oc get projects logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ kubectl version -o json logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ grep '\-eks\-' logger.go:42: 13:16:08 | monitoring/4-check-metrics | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:16:08 | monitoring/4-check-metrics | grep: warning: stray \ before - logger.go:42: 13:16:09 | monitoring/4-check-metrics | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:16:09 | monitoring/4-check-metrics | ++ '[' ']' logger.go:42: 13:16:09 | monitoring/4-check-metrics | ++ EKS=0 logger.go:42: 13:16:09 | monitoring/4-check-metrics | + sleep 70 logger.go:42: 13:17:19 | monitoring/4-check-metrics | ++ kubectl get secret internal-monitoring -o 'jsonpath={.data.pmmservertoken}' -n kuttl-test-definite-hare logger.go:42: 13:17:19 | monitoring/4-check-metrics | ++ base64 --decode logger.go:42: 13:17:19 | monitoring/4-check-metrics | + TOKEN=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:19 | monitoring/4-check-metrics | ++ seq 0 2 logger.go:42: 13:17:19 | monitoring/4-check-metrics | + for i in $(seq 0 2) logger.go:42: 13:17:19 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:19 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:20 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-definite-hare-monitoring-mysql-0 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:20 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 13:17:20 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-0 logger.go:42: 13:17:20 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:20 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:20 | monitoring/4-check-metrics | + local start=1761657380 logger.go:42: 13:17:20 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:20 | monitoring/4-check-metrics | + local end=1761657440 logger.go:42: 13:17:20 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:21 | monitoring/4-check-metrics | "1761653620" logger.go:42: 13:17:21 | monitoring/4-check-metrics | "1761653620" logger.go:42: 13:17:21 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:21 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:21 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-definite-hare-monitoring-mysql-0 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:21 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 13:17:21 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-0 logger.go:42: 13:17:21 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:21 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:21 | monitoring/4-check-metrics | + local start=1761657381 logger.go:42: 13:17:21 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:21 | monitoring/4-check-metrics | + local end=1761657441 logger.go:42: 13:17:21 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:22 | monitoring/4-check-metrics | "382" logger.go:42: 13:17:22 | monitoring/4-check-metrics | "101" logger.go:42: 13:17:22 | monitoring/4-check-metrics | + for i in $(seq 0 2) logger.go:42: 13:17:22 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:22 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:23 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-definite-hare-monitoring-mysql-1 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:23 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 13:17:23 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-1 logger.go:42: 13:17:23 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:23 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:23 | monitoring/4-check-metrics | + local start=1761657383 logger.go:42: 13:17:23 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:23 | monitoring/4-check-metrics | + local end=1761657443 logger.go:42: 13:17:23 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:24 | monitoring/4-check-metrics | "1761653607" logger.go:42: 13:17:24 | monitoring/4-check-metrics | "1761653607" logger.go:42: 13:17:24 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:24 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:24 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-definite-hare-monitoring-mysql-1 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:24 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 13:17:24 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-1 logger.go:42: 13:17:24 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:24 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:24 | monitoring/4-check-metrics | + local start=1761657384 logger.go:42: 13:17:24 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:24 | monitoring/4-check-metrics | + local end=1761657444 logger.go:42: 13:17:24 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:25 | monitoring/4-check-metrics | "325" logger.go:42: 13:17:25 | monitoring/4-check-metrics | "187" logger.go:42: 13:17:25 | monitoring/4-check-metrics | + for i in $(seq 0 2) logger.go:42: 13:17:25 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:25 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:26 | monitoring/4-check-metrics | + get_metric_values node_boot_time_seconds kuttl-test-definite-hare-monitoring-mysql-2 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:26 | monitoring/4-check-metrics | + local metric=node_boot_time_seconds logger.go:42: 13:17:26 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-2 logger.go:42: 13:17:26 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:26 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:26 | monitoring/4-check-metrics | + local start=1761657386 logger.go:42: 13:17:26 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:26 | monitoring/4-check-metrics | + local end=1761657446 logger.go:42: 13:17:26 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:27 | monitoring/4-check-metrics | "1761653606" logger.go:42: 13:17:27 | monitoring/4-check-metrics | "1761653606" logger.go:42: 13:17:27 | monitoring/4-check-metrics | ++ get_cluster_name logger.go:42: 13:17:27 | monitoring/4-check-metrics | ++ kubectl -n kuttl-test-definite-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:17:27 | monitoring/4-check-metrics | + get_metric_values mysql_global_status_uptime kuttl-test-definite-hare-monitoring-mysql-2 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:27 | monitoring/4-check-metrics | + local metric=mysql_global_status_uptime logger.go:42: 13:17:27 | monitoring/4-check-metrics | + local instance=kuttl-test-definite-hare-monitoring-mysql-2 logger.go:42: 13:17:27 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:17:27 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s -d '-1 minute' logger.go:42: 13:17:27 | monitoring/4-check-metrics | + local start=1761657387 logger.go:42: 13:17:27 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%s logger.go:42: 13:17:27 | monitoring/4-check-metrics | + local end=1761657447 logger.go:42: 13:17:27 | monitoring/4-check-metrics | + set +o xtrace logger.go:42: 13:17:28 | monitoring/4-check-metrics | "42" logger.go:42: 13:17:28 | monitoring/4-check-metrics | "47" logger.go:42: 13:17:28 | monitoring/4-check-metrics | + sleep 90 logger.go:42: 13:18:58 | monitoring/4-check-metrics | + get_qan20_values monitoring-mysql-0 glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local instance=monitoring-mysql-0 logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local token=glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496 logger.go:42: 13:18:58 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S -d '-30 minute' logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local start=2025-10-28T12:48:58 logger.go:42: 13:18:58 | monitoring/4-check-metrics | ++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local end=2025-10-28T13:18:58 logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local endpoint=monitoring-service logger.go:42: 13:18:58 | monitoring/4-check-metrics | ++ cat logger.go:42: 13:18:58 | monitoring/4-check-metrics | +++ /usr/sbin/date -u -d '-12 hour' +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 13:18:58 | monitoring/4-check-metrics | +++ /usr/sbin/date -u +%Y-%m-%dT%H:%M:%S%:z logger.go:42: 13:18:58 | monitoring/4-check-metrics | + local 'payload={ logger.go:42: 13:18:58 | monitoring/4-check-metrics | "columns":[ logger.go:42: 13:18:58 | monitoring/4-check-metrics | "load", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "num_queries", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "query_time" logger.go:42: 13:18:58 | monitoring/4-check-metrics | ], logger.go:42: 13:18:58 | monitoring/4-check-metrics | "first_seen": false, logger.go:42: 13:18:58 | monitoring/4-check-metrics | "group_by": "queryid", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "include_only_fields": [], logger.go:42: 13:18:58 | monitoring/4-check-metrics | "keyword": "", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "labels": [ logger.go:42: 13:18:58 | monitoring/4-check-metrics | { logger.go:42: 13:18:58 | monitoring/4-check-metrics | "key": "cluster", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "value": ["monitoring"] logger.go:42: 13:18:58 | monitoring/4-check-metrics | }], logger.go:42: 13:18:58 | monitoring/4-check-metrics | "limit": 10, logger.go:42: 13:18:58 | monitoring/4-check-metrics | "offset": 0, logger.go:42: 13:18:58 | monitoring/4-check-metrics | "order_by": "-load", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "main_metric": "load", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "period_start_from": "2025-10-28T01:18:58+00:00", logger.go:42: 13:18:58 | monitoring/4-check-metrics | "period_start_to": "2025-10-28T13:18:58+00:00" logger.go:42: 13:18:58 | monitoring/4-check-metrics | }' logger.go:42: 13:18:58 | monitoring/4-check-metrics | + jq '.rows[].fingerprint' logger.go:42: 13:18:58 | monitoring/4-check-metrics | ++ /usr/sbin/sed 's/\n//g' logger.go:42: 13:18:58 | monitoring/4-check-metrics | ++ echo '{' '"columns":[' '"load",' '"num_queries",' '"query_time"' '],' '"first_seen":' false, '"group_by":' '"queryid",' '"include_only_fields":' '[],' '"keyword":' '"",' '"labels":' '[' '{' '"key":' '"cluster",' '"value":' '["monitoring"]' '}],' '"limit":' 10, '"offset":' 0, '"order_by":' '"-load",' '"main_metric":' '"load",' '"period_start_from":' '"2025-10-28T01:18:58+00:00",' '"period_start_to":' '"2025-10-28T13:18:58+00:00"' '}' logger.go:42: 13:18:58 | monitoring/4-check-metrics | + run_curl -XPOST -d ''\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2025-10-28T01:18:58+00:00", "period_start_to": "2025-10-28T13:18:58+00:00" }'\''' '-H '\''Authorization: Bearer glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496'\''' https://@monitoring-service/v1/qan/metrics:getReport logger.go:42: 13:18:58 | monitoring/4-check-metrics | + kubectl -n kuttl-test-definite-hare exec mysql-client -- bash -c 'curl -s -k -XPOST -d '\''{ "columns":[ "load", "num_queries", "query_time" ], "first_seen": false, "group_by": "queryid", "include_only_fields": [], "keyword": "", "labels": [ { "key": "cluster", "value": ["monitoring"] }], "limit": 10, "offset": 0, "order_by": "-load", "main_metric": "load", "period_start_from": "2025-10-28T01:18:58+00:00", "period_start_to": "2025-10-28T13:18:58+00:00" }'\'' -H '\''Authorization: Bearer glsa_VNPhL92lbwPDJ07fGE9T1HGF2rjdIcxI_95abb496'\'' https://@monitoring-service/v1/qan/metrics:getReport' logger.go:42: 13:19:00 | monitoring/4-check-metrics | "TOTAL" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT `EVENT_NAME` , `COUNT_STAR` , `SUM_TIMER_WAIT` FROM `performance_schema` . `events_waits_summary_global_by_event_name`" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "REPLACE INTO `sys_operator` . `heartbeat` ( `ts` , `server_id` , FILE , `position` , `relay_source_log_file` , `exec_source_log_pos` ) VALUES (...)" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SHOW GLOBAL VARIABLES LIKE ?" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS LIKE ?" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT `c` . `table_schema` , `c` . `table_name` , COLUMN_NAME , AUTO_INCREMENT , `pow` ( ? , CASE `data_type` WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? WHEN ? THEN ? END + ( `column_type` LIKE ? ) ) - ? AS `max_int` FROM `information_schema` . `columns` `c` STRAIGHT_JOIN `information_schema` . `tables` `t` ON ( BINARY `c` . `table_schema` = `t` . `table_schema` AND BINARY `c` . `table_name` = `t` . `table_name` ) WHERE `c` . `extra` = ? AND `t` . `auto_increment` IS NOT NULL" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SHOW GLOBAL STATUS" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT COLUMN_NAME FROM `information_schema` . `columns` WHERE `table_schema` = ? AND TABLE_NAME = ? AND COLUMN_NAME IN (...) LIMIT ?" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT SYSTEM_USER , `substring_index` ( HOST , ?, ... ) AS `slave_hostname` FROM `information_schema` . `processlist` WHERE `command` IN (...)" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT COUNT ( * ) > ? AND MAX ( `User_name` ) != ? FROM `mysql` . `slave_master_info`" logger.go:42: 13:19:00 | monitoring/4-check-metrics | "SELECT `performance_schema` . `events_statements_history` . `SQL_TEXT` , `performance_schema` . `events_statements_history` . `DIGEST` , `performance_schema` . `events_statements_history` . `DIGEST_TEXT` , `performance_schema` . `events_statements_history` . `CURRENT_SCHEMA` FROM `performance_schema` . `events_statements_history` WHERE `DIGEST` IS NOT NULL AND `SQL_TEXT` IS NOT NULL AND `DIGEST_TEXT` IS NOT NULL" logger.go:42: 13:19:00 | monitoring/4-check-metrics | ++ get_service_ip monitoring-haproxy logger.go:42: 13:19:00 | monitoring/4-check-metrics | ++ local service=monitoring-haproxy logger.go:42: 13:19:00 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:19:00 | monitoring/4-check-metrics | ++ grep -q NotFound logger.go:42: 13:19:01 | monitoring/4-check-metrics | +++ kubectl get service/monitoring-haproxy -n kuttl-test-definite-hare -o 'jsonpath={.spec.type}' logger.go:42: 13:19:01 | monitoring/4-check-metrics | ++ '[' LoadBalancer = ClusterIP ']' logger.go:42: 13:19:01 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[]}' logger.go:42: 13:19:01 | monitoring/4-check-metrics | ++ egrep -q 'hostname|ip' logger.go:42: 13:19:01 | monitoring/4-check-metrics | egrep: warning: egrep is obsolescent; using grep -E logger.go:42: 13:19:01 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].ip}' logger.go:42: 13:19:02 | monitoring/4-check-metrics | ++ kubectl get service/monitoring-haproxy -n kuttl-test-definite-hare -o 'jsonpath={.status.loadBalancer.ingress[].hostname}' logger.go:42: 13:19:02 | monitoring/4-check-metrics | + haproxy_svc=34.72.31.22 logger.go:42: 13:19:02 | monitoring/4-check-metrics | ++ curl -s -o /dev/null -w '%{http_code}' http://34.72.31.22:8404/metrics logger.go:42: 13:19:03 | monitoring/4-check-metrics | + http_code=200 logger.go:42: 13:19:03 | monitoring/4-check-metrics | + [[ 200 != 200 ]] logger.go:42: 13:19:03 | monitoring/4-check-metrics | test step completed 4-check-metrics logger.go:42: 13:19:03 | monitoring/5-check-password-leak | starting test step 5-check-password-leak logger.go:42: 13:19:03 | monitoring/5-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Temporarily skipping this check # check_passwords_leak] logger.go:42: 13:19:03 | monitoring/5-check-password-leak | + source ../../functions logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ realpath ../../.. logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++++ pwd logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++ test_name=monitoring logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ [[ -z 8.4 ]] logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ MYSQL_VERSION=8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export MINIO_VER=5.4.0 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ MINIO_VER=5.4.0 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ export VAULT_VER=0.16.1 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ VAULT_VER=0.16.1 logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++++ which gdate logger.go:42: 13:19:03 | monitoring/5-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:19:03 | monitoring/5-check-password-leak | ++++ which date logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ date=/usr/sbin/date logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ oc get projects logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ : logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ kubectl get nodes logger.go:42: 13:19:03 | monitoring/5-check-password-leak | +++ grep '^minikube' logger.go:42: 13:19:04 | monitoring/5-check-password-leak | +++ which gsed logger.go:42: 13:19:04 | monitoring/5-check-password-leak | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:19:04 | monitoring/5-check-password-leak | +++ which sed logger.go:42: 13:19:04 | monitoring/5-check-password-leak | ++ sed=/usr/sbin/sed logger.go:42: 13:19:04 | monitoring/5-check-password-leak | ++ oc get projects logger.go:42: 13:19:04 | monitoring/5-check-password-leak | +++ kubectl version -o json logger.go:42: 13:19:04 | monitoring/5-check-password-leak | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:19:04 | monitoring/5-check-password-leak | +++ grep '\-eks\-' logger.go:42: 13:19:04 | monitoring/5-check-password-leak | grep: warning: stray \ before - logger.go:42: 13:19:04 | monitoring/5-check-password-leak | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:19:04 | monitoring/5-check-password-leak | ++ '[' ']' logger.go:42: 13:19:04 | monitoring/5-check-password-leak | ++ EKS=0 logger.go:42: 13:19:04 | monitoring/5-check-password-leak | test step completed 5-check-password-leak logger.go:42: 13:19:04 | monitoring/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 13:19:05 | monitoring/98-drop-finalizer | PerconaServerMySQL:kuttl-test-definite-hare/monitoring updated logger.go:42: 13:19:05 | monitoring/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 13:19:05 | monitoring/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/tests/monitoring logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++ test_name=monitoring logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/vars.sh logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/deploy logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/e2e-tests/conf logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/monitoring logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-1125 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-1125 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export VERSION=PR-1125-703ecc3e logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ VERSION=PR-1125-703ecc3e logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-1125-703ecc3e logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ [[ -z 8.4 ]] logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export MYSQL_VERSION=8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ MYSQL_VERSION=8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.4 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.18.2 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.18.2 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++++ which date logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ : logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ which gsed logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | which: no gsed in (/mnt/jenkins/workspace/cloud-ps-operator_PR-1125/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ which sed logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++ sed=/usr/sbin/sed logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ kubectl version -o json logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ grep '\-eks\-' logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | +++ jq -r .serverVersion.gitVersion logger.go:42: 13:19:06 | monitoring/99-remove-cluster-gracefully | grep: warning: stray \ before - logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | Warning: version difference between client (1.34) and server (1.31) exceeds the supported minor version skew of +/-1 logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | ++ '[' ']' logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | ++ EKS=0 logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:19:07 | monitoring/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted from ps-operator namespace logger.go:42: 13:19:08 | monitoring/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 13:19:08 | monitoring/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 13:19:08 | monitoring/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:19:08 | monitoring/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 13:19:19 | monitoring/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 13:19:20 | monitoring | monitoring events from ns kuttl-test-definite-hare: logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:02 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-definite-hare/mysql-client to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:02 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql8.4" already present on machine kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:02 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:02 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:16 +0000 UTC Normal Service monitoring-service EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:16 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Claim pmm-storage-monitoring-0 Pod monitoring-0 in StatefulSet monitoring success statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:16 +0000 UTC Normal StatefulSet.apps monitoring SuccessfulCreate create Pod monitoring-0 in StatefulSet monitoring successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:16 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:17 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:17 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-definite-hare/pmm-storage-monitoring-0" pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:20 +0000 UTC Normal PersistentVolumeClaim pmm-storage-monitoring-0 ProvisioningSucceeded Successfully provisioned volume pvc-beada461-1291-4535-b1f4-40d14e78ec22 pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:21 +0000 UTC Normal Pod monitoring-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:28 +0000 UTC Normal Pod monitoring-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-beada461-1291-4535-b1f4-40d14e78ec22" attachdetach-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:29 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Pulling Pulling image "perconalab/pmm-server:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:51 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Pulled Successfully pulled image "perconalab/pmm-server:3-dev-latest" in 21.726s (21.726s including waiting). Image size: 894885745 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:51 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Created Created container: pmm kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:51 +0000 UTC Normal Pod monitoring-0.spec.containers{pmm} Started Started container pmm kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:52 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: Get "http://10.240.178.36:8080/v1/readyz": dial tcp 10.240.178.36:8080: connect: connection refused kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:03:57 +0000 UTC Normal Service monitoring-service EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:04:04 +0000 UTC Warning Pod monitoring-0.spec.containers{pmm} Unhealthy Readiness probe failed: HTTP probe failed with statuscode: 503 kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:27 +0000 UTC Normal Service monitoring-haproxy EnsuringLoadBalancer Ensuring load balancer service-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-definite-hare/datadir-monitoring-mysql-0" pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-0 Pod monitoring-mysql-0 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-0 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Normal PodDisruptionBudget.policy monitoring-mysql NoPods No matching pods found controllermanager logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:28 +0000 UTC Warning PerconaServerMySQL.ps.percona.com monitoring ReconcileError Failed to reconcile cluster ps-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal Pod monitoring-orc-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-orc-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 240ms (240ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal Pod monitoring-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-0 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Normal PodDisruptionBudget.policy monitoring-orchestrator NoPods No matching pods found controllermanager logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:29 +0000 UTC Warning PerconaServerMySQL.ps.percona.com monitoring ClusterStateChanged Error -> Initializing ps-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-2b1189c1-3880-4ee2-81c2-58d1dfa85a94 pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-mysql-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-9mtw default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 192ms (192ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 183ms (183ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:32 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:39 +0000 UTC Normal Pod monitoring-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2b1189c1-3880-4ee2-81c2-58d1dfa85a94" attachdetach-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:41 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:41 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 228ms (228ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:41 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:41 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 178ms (178ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 193ms (193ms including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 195ms (195ms including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:43 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 7.257s (7.257s including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:51 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:05:55 +0000 UTC Normal Service monitoring-haproxy EnsuredLoadBalancer Ensured load balancer service-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:04 +0000 UTC Normal Pod monitoring-orc-1 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-orc-1 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-w261 default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:04 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-1 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:05 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:05 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 132ms (132ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:05 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:05 +0000 UTC Normal Pod monitoring-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 193ms (193ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 216ms (216ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:16 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:16 +0000 UTC Normal PodDisruptionBudget.policy monitoring-haproxy NoPods No matching pods found controllermanager logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:16 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-1 Pod monitoring-mysql-1 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-definite-hare/datadir-monitoring-mysql-1" pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal Pod monitoring-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-9mtw default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 227ms (227ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:17 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-1 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-a80124b4-087c-49b7-9995-42fb26ca1352 pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 192ms (192ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 210ms (210ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 149ms (149ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:21 +0000 UTC Normal Pod monitoring-mysql-1 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-1 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-w261 default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:28 +0000 UTC Normal Pod monitoring-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a80124b4-087c-49b7-9995-42fb26ca1352" attachdetach-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:32 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 231ms (231ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:33 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:33 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 108ms (108ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 189ms (189ms including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:34 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:35 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 200ms (200ms including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:35 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:35 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:35 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:37 +0000 UTC Warning Pod monitoring-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:40 +0000 UTC Normal Pod monitoring-orc-2 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-orc-2 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-9mtw default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:40 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:40 +0000 UTC Normal StatefulSet.apps monitoring-orc SuccessfulCreate create Pod monitoring-orc-2 in StatefulSet monitoring-orc successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 6.412s (6.412s including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 222ms (222ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:41 +0000 UTC Normal Pod monitoring-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 206ms (206ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 210ms (210ms including waiting). Image size: 72389345 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:43 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:53 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/10/28 13:06:52 Waiting for MySQL ready state 2025/10/28 13:06:52 MySQL is ready 2025/10/28 13:06:52 Peers: [3630356635616330.monitoring-mysql-unready.kuttl-test-definite-hare 6539636263376662.monitoring-mysql-unready.kuttl-test-definite-hare] 2025/10/28 13:06:52 FQDN: monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:06:52 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare] 2025/10/28 13:06:52 lookup monitoring-mysql-1 [10.240.176.24] 2025/10/28 13:06:52 PodIP: 10.240.176.24 2025/10/28 13:06:52 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare [10.240.177.28] 2025/10/28 13:06:52 PrimaryIP: 10.240.177.28 2025/10/28 13:06:52 Donor: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:06:52 Opening connection to 10.240.176.24 2025/10/28 13:06:52 Clone required: true 2025/10/28 13:06:52 Checking if a clone in progress 2025/10/28 13:06:52 Clone in progress: false 2025/10/28 13:06:52 Cloning from monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:06:53 Clone finished. Restarting container... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:53 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:06:57 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 163ms (163ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:27 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:27 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Claim datadir-monitoring-mysql-2 Pod monitoring-mysql-2 in StatefulSet monitoring-mysql success statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:28 +0000 UTC Warning Pod monitoring-mysql-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/disk-pressure: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:28 +0000 UTC Normal Pod monitoring-mysql-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:28 +0000 UTC Normal StatefulSet.apps monitoring-mysql SuccessfulCreate create Pod monitoring-mysql-2 in StatefulSet monitoring-mysql successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:29 +0000 UTC Warning Pod monitoring-mysql-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/disk-pressure: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:30 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 WaitForPodScheduled waiting for pod monitoring-mysql-2 to be scheduled persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:52 +0000 UTC Normal Pod monitoring-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-1 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-w261 default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:52 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:53 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:53 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 221ms (221ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:53 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:53 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 168ms (168ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 210ms (210ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 222ms (222ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:07:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:08:13 +0000 UTC Warning Pod monitoring-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/disk-pressure: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:08:13 +0000 UTC Normal Pod monitoring-haproxy-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:08:13 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulCreate create Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:08:14 +0000 UTC Warning Pod monitoring-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/disk-pressure: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:43 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:43 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-definite-hare/datadir-monitoring-mysql-2" pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:43 +0000 UTC Normal Pod monitoring-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-2 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:43 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:44 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 178ms (178ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:44 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:44 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:45 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal PersistentVolumeClaim datadir-monitoring-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-302f545f-fddf-49b5-8be6-a61f3d791ac5 pd.csi.storage.gke.io_gke-f1decfb149be4c3d8254-c288-ecb5-vm_7574c2ad-2eca-4c09-8093-b51ec390293e logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 2.327s (2.327s including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:47 +0000 UTC Normal Pod monitoring-mysql-2 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-2 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 193ms (193ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:48 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:54 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 5.96s (5.96s including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:54 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:54 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:55 +0000 UTC Normal Pod monitoring-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-302f545f-fddf-49b5-8be6-a61f3d791ac5" attachdetach-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 188ms (188ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:10:56 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:03 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:03 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 181ms (181ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:03 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:03 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:03 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:21 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 18.07s (18.07s including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:22 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:22 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:22 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:25 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 3.448s (3.448s including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:25 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:26 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:26 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:26 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 763ms (763ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:27 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:27 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:36 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/10/28 13:11:36 Waiting for MySQL ready state 2025/10/28 13:11:36 MySQL is ready 2025/10/28 13:11:36 Peers: [3630356635616330.monitoring-mysql-unready.kuttl-test-definite-hare 3833323037336363.monitoring-mysql-unready.kuttl-test-definite-hare 6539636263376662.monitoring-mysql-unready.kuttl-test-definite-hare] 2025/10/28 13:11:36 FQDN: monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:11:36 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare] 2025/10/28 13:11:36 lookup monitoring-mysql-2 [10.240.178.39] 2025/10/28 13:11:36 PodIP: 10.240.178.39 2025/10/28 13:11:36 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare [10.240.177.28] 2025/10/28 13:11:36 PrimaryIP: 10.240.177.28 2025/10/28 13:11:36 Donor: monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:11:36 Opening connection to 10.240.178.39 2025/10/28 13:11:36 Clone required: true 2025/10/28 13:11:36 Checking if a clone in progress 2025/10/28 13:11:36 Clone in progress: false 2025/10/28 13:11:36 Cloning from monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:11:36 Clone finished. Restarting container... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:36 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:11:42 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 193ms (193ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:18 +0000 UTC Warning PerconaServerMySQL.ps.percona.com monitoring ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:28 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:28 +0000 UTC Warning Pod monitoring-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:28 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-2 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:30 +0000 UTC Normal Pod monitoring-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-2 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:31 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:31 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 191ms (191ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:31 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:31 +0000 UTC Normal Pod monitoring-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:33 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 223ms (223ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 162ms (162ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 125ms (125ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:34 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:36 +0000 UTC Normal Pod monitoring-mysql-2 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-2 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-vcxq default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:37 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:37 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 167ms (167ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:37 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:37 +0000 UTC Normal Pod monitoring-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:39 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:39 +0000 UTC Warning PerconaServerMySQL.ps.percona.com monitoring ClusterStateChanged Ready -> Initializing ps-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 218ms (218ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 140ms (140ms including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 194ms (194ms including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 265ms (265ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:40 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:54 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:54 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:54 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:54 +0000 UTC Warning Pod monitoring-haproxy-1.spec.containers{pmm-client} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:54 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-1 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:55 +0000 UTC Normal Pod monitoring-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-1 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-w261 default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:55 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 226ms (226ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:56 +0000 UTC Normal Pod monitoring-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:57 +0000 UTC Warning Pod monitoring-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/10/28 13:12:57 Waiting for MySQL ready state 2025/10/28 13:12:57 MySQL is ready 2025/10/28 13:12:57 Peers: [3630356635616330.monitoring-mysql-unready.kuttl-test-definite-hare 3933393131646364.monitoring-mysql-unready.kuttl-test-definite-hare 6539636263376662.monitoring-mysql-unready.kuttl-test-definite-hare] 2025/10/28 13:12:57 FQDN: monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:12:57 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare] 2025/10/28 13:12:57 lookup monitoring-mysql-2 [10.240.178.41] 2025/10/28 13:12:57 PodIP: 10.240.178.41 2025/10/28 13:12:57 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare [10.240.177.28] 2025/10/28 13:12:57 PrimaryIP: 10.240.177.28 2025/10/28 13:12:57 Donor: monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:12:57 Opening connection to 10.240.178.41 2025/10/28 13:12:57 Clone required: true 2025/10/28 13:12:57 Checking if a clone in progress 2025/10/28 13:12:57 Clone in progress: false 2025/10/28 13:12:57 Cloning from monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:12:57 Clone finished. Restarting container... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:57 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 96ms (96ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 106ms (106ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 215ms (215ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:12:58 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:01 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 197ms (197ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:16 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:16 +0000 UTC Normal StatefulSet.apps monitoring-haproxy SuccessfulDelete delete Pod monitoring-haproxy-0 in StatefulSet monitoring-haproxy successful statefulset-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:17 +0000 UTC Normal Pod monitoring-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-haproxy-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-9mtw default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:17 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:18 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 151ms (151ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:18 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:18 +0000 UTC Normal Pod monitoring-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:19 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:19 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 170ms (171ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:19 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 174ms (174ms including waiting). Image size: 105313584 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 164ms (164ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:20 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:40 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:44 +0000 UTC Normal Pod monitoring-mysql-1 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-1 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-w261 default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 171ms (171ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:46 +0000 UTC Normal Pod monitoring-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 89ms (89ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 181ms (181ms including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 126ms (126ms including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:48 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:49 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 236ms (236ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:49 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:13:49 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:07 +0000 UTC Warning Pod monitoring-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/10/28 13:14:06 Waiting for MySQL ready state 2025/10/28 13:14:06 MySQL is ready 2025/10/28 13:14:06 Peers: [3933393131646364.monitoring-mysql-unready.kuttl-test-definite-hare 6164343836666362.monitoring-mysql-unready.kuttl-test-definite-hare 6539636263376662.monitoring-mysql-unready.kuttl-test-definite-hare] 2025/10/28 13:14:06 FQDN: monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:14:06 Primary: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare Replicas: [monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare] 2025/10/28 13:14:06 lookup monitoring-mysql-1 [10.240.176.27] 2025/10/28 13:14:06 PodIP: 10.240.176.27 2025/10/28 13:14:06 lookup monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare [10.240.177.28] 2025/10/28 13:14:06 PrimaryIP: 10.240.177.28 2025/10/28 13:14:06 Donor: monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:14:06 Opening connection to 10.240.176.27 2025/10/28 13:14:06 Clone required: true 2025/10/28 13:14:06 Checking if a clone in progress 2025/10/28 13:14:06 Clone in progress: false 2025/10/28 13:14:06 Cloning from monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:14:07 Clone finished. Restarting container... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:07 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:10 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 182ms (182ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:42 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:42 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:42 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:42 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:42 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{pmm-client} FailedPreStopHook PreStopHook failed kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:46 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/10/28 13:14:46 MySQL state is not ready... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:50 +0000 UTC Warning Pod monitoring-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:51 +0000 UTC Warning Pod monitoring-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: command timed out: "/opt/percona/haproxy_readiness_check.sh" timed out after 1s kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:54 +0000 UTC Warning Pod monitoring-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:54 +0000 UTC Warning Pod monitoring-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: command timed out: "/opt/percona/haproxy_readiness_check.sh" timed out after 1s kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:55 +0000 UTC Warning Pod monitoring-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:55 +0000 UTC Warning Pod monitoring-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: command timed out: "/opt/percona/haproxy_readiness_check.sh" timed out after 1s kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:14:55 +0000 UTC Warning Pod monitoring-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:03 +0000 UTC Normal Pod monitoring-mysql-0 Binding Scheduled Successfully assigned kuttl-test-definite-hare/monitoring-mysql-0 to gke-jen-ps-1125-703ecc3e-default-pool-fbc52862-9mtw default-scheduler logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:12 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:12 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-1125-703ecc3e" in 138ms (138ms including waiting). Image size: 109944390 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:12 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:12 +0000 UTC Normal Pod monitoring-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:13 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 129ms (129ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.4" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.4" in 164ms (164ms including waiting). Image size: 543257981 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 193ms (194ms including waiting). Image size: 133854467 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulling Pulling image "perconalab/pmm-client:3-dev-latest" kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Pulled Successfully pulled image "perconalab/pmm-client:3-dev-latest" in 239ms (239ms including waiting). Image size: 239831884 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:14 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Created Created container: pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:15 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{pmm-client} Started Started container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:32 +0000 UTC Warning Pod monitoring-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/10/28 13:15:32 Waiting for MySQL ready state 2025/10/28 13:15:32 MySQL is ready 2025/10/28 13:15:32 Peers: [3933393131646364.monitoring-mysql-unready.kuttl-test-definite-hare 6164343836666362.monitoring-mysql-unready.kuttl-test-definite-hare 6333303937623361.monitoring-mysql-unready.kuttl-test-definite-hare] 2025/10/28 13:15:32 FQDN: monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:15:32 Primary: monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare Replicas: [monitoring-mysql-0.monitoring-mysql.kuttl-test-definite-hare monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare] 2025/10/28 13:15:32 lookup monitoring-mysql-0 [10.240.177.32] 2025/10/28 13:15:32 PodIP: 10.240.177.32 2025/10/28 13:15:32 lookup monitoring-mysql-2.monitoring-mysql.kuttl-test-definite-hare [10.240.178.41] 2025/10/28 13:15:32 PrimaryIP: 10.240.178.41 2025/10/28 13:15:32 Donor: monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:15:32 Opening connection to 10.240.177.32 2025/10/28 13:15:32 Clone required: true 2025/10/28 13:15:32 Checking if a clone in progress 2025/10/28 13:15:32 Clone in progress: false 2025/10/28 13:15:32 Cloning from monitoring-mysql-1.monitoring-mysql.kuttl-test-definite-hare 2025/10/28 13:15:32 Clone finished. Restarting container... kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:32 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:15:36 +0000 UTC Normal Pod monitoring-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.4" in 104ms (104ms including waiting). Image size: 433128099 bytes. kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:06 +0000 UTC Normal Service monitoring-haproxy DeletingLoadBalancer Deleting load balancer service-controller logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{pmm-client} Killing Stopping container pmm-client kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:19:20 | monitoring | 2025-10-28 13:19:07 +0000 UTC Normal Pod monitoring-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:19:20 | monitoring | Deleting namespace: kuttl-test-definite-hare === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- PASS: kuttl (1042.95s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/monitoring (1042.23s) PASS