=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.55.44.16 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 38 tests === RUN kuttl/harness === RUN kuttl/harness/haproxy === PAUSE kuttl/harness/haproxy === CONT kuttl/harness/haproxy logger.go:42: 13:50:59 | haproxy | Creating namespace: kuttl-test-loving-racer logger.go:42: 13:51:00 | haproxy/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 13:51:00 | haproxy/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + source ../../functions logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ realpath ../../.. logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++++ pwd logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++ test_name=haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ GIT_BRANCH=PR-975 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++++ which gdate logger.go:42: 13:51:00 | haproxy/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++++ which date logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ oc get projects logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ : logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ kubectl get nodes logger.go:42: 13:51:00 | haproxy/0-deploy-operator | +++ grep '^minikube' logger.go:42: 13:51:00 | haproxy/0-deploy-operator | ++ oc get projects logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + init_temp_dir logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + rm -rf /tmp/kuttl/ps/haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/haproxy logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + deploy_operator logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + destroy_operator logger.go:42: 13:51:00 | haproxy/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 13:51:00 | haproxy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:51:01 | haproxy/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + true logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 13:51:01 | haproxy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:51:01 | haproxy/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + true logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + create_namespace ps-operator logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + local namespace=ps-operator logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + [[ -n '' ]] logger.go:42: 13:51:01 | haproxy/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 13:51:02 | haproxy/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 13:51:02 | haproxy/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 13:51:03 | haproxy/0-deploy-operator | namespace/ps-operator created logger.go:42: 13:51:03 | haproxy/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy/crd.yaml logger.go:42: 13:51:04 | haproxy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 13:51:04 | haproxy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 13:51:05 | haproxy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 13:51:05 | haproxy/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 13:51:05 | haproxy/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy/cw-rbac.yaml logger.go:42: 13:51:06 | haproxy/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 13:51:07 | haproxy/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 13:51:07 | haproxy/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 13:51:07 | haproxy/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 13:51:07 | haproxy/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 13:51:07 | haproxy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 13:51:07 | haproxy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 13:51:07 | haproxy/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:07 | haproxy/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 13:51:07 | haproxy/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-975-d7710be1"' /mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy/cw-operator.yaml logger.go:42: 13:51:08 | haproxy/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 13:51:09 | haproxy/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 13:51:09 | haproxy/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 13:51:09 | haproxy/0-deploy-operator | + kubectl -n kuttl-test-loving-racer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf/secrets.yaml logger.go:42: 13:51:10 | haproxy/0-deploy-operator | secret/test-secrets created logger.go:42: 13:51:10 | haproxy/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 13:51:10 | haproxy/0-deploy-operator | + kubectl -n kuttl-test-loving-racer apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf/ssl-secret.yaml logger.go:42: 13:51:11 | haproxy/0-deploy-operator | secret/test-ssl created logger.go:42: 13:51:11 | haproxy/0-deploy-operator | + deploy_client logger.go:42: 13:51:11 | haproxy/0-deploy-operator | + kubectl -n kuttl-test-loving-racer apply -f - logger.go:42: 13:51:11 | haproxy/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:11 | haproxy/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf/client.yaml logger.go:42: 13:51:12 | haproxy/0-deploy-operator | pod/mysql-client created logger.go:42: 13:51:13 | haproxy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:51:13 | haproxy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:51:13 | haproxy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:51:15 | haproxy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:51:15 | haproxy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:51:15 | haproxy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:51:16 | haproxy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:51:17 | haproxy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:51:17 | haproxy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:51:18 | haproxy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:51:18 | haproxy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:51:19 | haproxy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 13:51:20 | haproxy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 13:51:20 | haproxy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 13:51:21 | haproxy/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 13:51:21 | haproxy/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 13:51:21 | haproxy/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 13:51:21 | haproxy/0-deploy-operator | ASSERT PASS logger.go:42: 13:51:21 | haproxy/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 13:51:21 | haproxy/1-create-cluster | starting test step 1-create-cluster logger.go:42: 13:51:21 | haproxy/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.updateStrategy="RollingUpdate"' - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.livenessProbe.timeoutSeconds=30' - \ | yq eval '.spec.proxy.haproxy.livenessProbe.periodSeconds=10' - \ | yq eval '.spec.proxy.haproxy.readinessProbe.failureThreshold=40' - \ | yq eval '.spec.proxy.haproxy.readinessProbe.successThreshold=10' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 13:51:21 | haproxy/1-create-cluster | + source ../../functions logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ realpath ../../.. logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++++ pwd logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ test_name=haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ GIT_BRANCH=PR-975 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++++ which gdate logger.go:42: 13:51:21 | haproxy/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++++ which date logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ date=/usr/sbin/date logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ oc get projects logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ : logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ kubectl get nodes logger.go:42: 13:51:21 | haproxy/1-create-cluster | +++ grep '^minikube' logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ oc get projects logger.go:42: 13:51:21 | haproxy/1-create-cluster | + get_cr logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.updateStrategy="RollingUpdate"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + local name_suffix= logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.livenessProbe.timeoutSeconds=30 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.metadata.name="%s"' haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.metadata.name="haproxy"' /mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy/cr.yaml logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.livenessProbe.periodSeconds=10 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.readinessProbe.failureThreshold=40 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + kubectl -n kuttl-test-loving-racer apply -f - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval .spec.proxy.haproxy.readinessProbe.successThreshold=10 - logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | + '[' -n '' ']' logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval - logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-975-d7710be1"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:51:21 | haproxy/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 13:51:21 | haproxy/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 13:51:23 | haproxy/1-create-cluster | perconaservermysql.ps.percona.com/haproxy created logger.go:42: 13:55:42 | haproxy/1-create-cluster | test step completed 1-create-cluster logger.go:42: 13:55:42 | haproxy/2-write-data | starting test step 2-write-data logger.go:42: 13:55:42 | haproxy/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 13:55:42 | haproxy/2-write-data | + source ../../functions logger.go:42: 13:55:42 | haproxy/2-write-data | +++ realpath ../../.. logger.go:42: 13:55:42 | haproxy/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:42 | haproxy/2-write-data | ++++ pwd logger.go:42: 13:55:42 | haproxy/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | ++ test_name=haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:42 | haproxy/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:42 | haproxy/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:42 | haproxy/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ GIT_BRANCH=PR-975 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:42 | haproxy/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 13:55:42 | haproxy/2-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 13:55:42 | haproxy/2-write-data | ++++ which gdate logger.go:42: 13:55:42 | haproxy/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:55:42 | haproxy/2-write-data | ++++ which date logger.go:42: 13:55:42 | haproxy/2-write-data | +++ date=/usr/sbin/date logger.go:42: 13:55:42 | haproxy/2-write-data | +++ oc get projects logger.go:42: 13:55:42 | haproxy/2-write-data | +++ : logger.go:42: 13:55:42 | haproxy/2-write-data | +++ kubectl get nodes logger.go:42: 13:55:42 | haproxy/2-write-data | +++ grep '^minikube' logger.go:42: 13:55:43 | haproxy/2-write-data | ++ oc get projects logger.go:42: 13:55:43 | haproxy/2-write-data | +++ get_cluster_name logger.go:42: 13:55:43 | haproxy/2-write-data | +++ kubectl -n kuttl-test-loving-racer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:55:43 | haproxy/2-write-data | ++ get_haproxy_svc haproxy logger.go:42: 13:55:43 | haproxy/2-write-data | ++ local cluster=haproxy logger.go:42: 13:55:43 | haproxy/2-write-data | ++ echo haproxy-haproxy logger.go:42: 13:55:43 | haproxy/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:43 | haproxy/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 13:55:43 | haproxy/2-write-data | + local 'uri=-h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:43 | haproxy/2-write-data | + local pod= logger.go:42: 13:55:43 | haproxy/2-write-data | ++ get_client_pod logger.go:42: 13:55:43 | haproxy/2-write-data | ++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:55:44 | haproxy/2-write-data | + client_pod=mysql-client logger.go:42: 13:55:44 | haproxy/2-write-data | + wait_pod mysql-client logger.go:42: 13:55:44 | haproxy/2-write-data | + local pod=mysql-client logger.go:42: 13:55:44 | haproxy/2-write-data | + set +o xtrace logger.go:42: 13:55:44 | haproxy/2-write-data | mysql-clienttrue logger.go:42: 13:55:44 | haproxy/2-write-data | + kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:44 | haproxy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 13:55:44 | haproxy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:55:45 | haproxy/2-write-data | + : logger.go:42: 13:55:45 | haproxy/2-write-data | +++ get_cluster_name logger.go:42: 13:55:45 | haproxy/2-write-data | +++ kubectl -n kuttl-test-loving-racer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:55:46 | haproxy/2-write-data | ++ get_haproxy_svc haproxy logger.go:42: 13:55:46 | haproxy/2-write-data | ++ local cluster=haproxy logger.go:42: 13:55:46 | haproxy/2-write-data | ++ echo haproxy-haproxy logger.go:42: 13:55:46 | haproxy/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:46 | haproxy/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 13:55:46 | haproxy/2-write-data | + local 'uri=-h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:46 | haproxy/2-write-data | + local pod= logger.go:42: 13:55:46 | haproxy/2-write-data | ++ get_client_pod logger.go:42: 13:55:46 | haproxy/2-write-data | ++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:55:46 | haproxy/2-write-data | + client_pod=mysql-client logger.go:42: 13:55:46 | haproxy/2-write-data | + wait_pod mysql-client logger.go:42: 13:55:46 | haproxy/2-write-data | + local pod=mysql-client logger.go:42: 13:55:46 | haproxy/2-write-data | + set +o xtrace logger.go:42: 13:55:47 | haproxy/2-write-data | mysql-clienttrue logger.go:42: 13:55:47 | haproxy/2-write-data | + kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h haproxy-haproxy -uroot -proot_password' logger.go:42: 13:55:47 | haproxy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 13:55:47 | haproxy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:55:48 | haproxy/2-write-data | + : logger.go:42: 13:55:48 | haproxy/2-write-data | test step completed 2-write-data logger.go:42: 13:55:48 | haproxy/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 13:55:48 | haproxy/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -P3306 -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 13:55:48 | haproxy/3-read-from-primary | + source ../../functions logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ realpath ../../.. logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ pwd logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++ test_name=haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ GIT_BRANCH=PR-975 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export MINIO_VER=5.4.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ MINIO_VER=5.4.0 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ export VAULT_VER=0.16.1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ VAULT_VER=0.16.1 logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ which gdate logger.go:42: 13:55:48 | haproxy/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ which date logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ date=/usr/sbin/date logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ oc get projects logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ : logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ kubectl get nodes logger.go:42: 13:55:48 | haproxy/3-read-from-primary | +++ grep '^minikube' logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++ oc get projects logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ get_cluster_name logger.go:42: 13:55:48 | haproxy/3-read-from-primary | ++++ kubectl -n kuttl-test-loving-racer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:55:49 | haproxy/3-read-from-primary | +++ get_haproxy_svc haproxy logger.go:42: 13:55:49 | haproxy/3-read-from-primary | +++ local cluster=haproxy logger.go:42: 13:55:49 | haproxy/3-read-from-primary | +++ echo haproxy-haproxy logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h haproxy-haproxy -P3306 -uroot -proot_password' logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ local 'uri=-h haproxy-haproxy -P3306 -uroot -proot_password' logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ local pod= logger.go:42: 13:55:49 | haproxy/3-read-from-primary | +++ get_client_pod logger.go:42: 13:55:49 | haproxy/3-read-from-primary | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ client_pod=mysql-client logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ wait_pod mysql-client logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ local pod=mysql-client logger.go:42: 13:55:49 | haproxy/3-read-from-primary | ++ set +o xtrace logger.go:42: 13:55:50 | haproxy/3-read-from-primary | mysql-clienttrue logger.go:42: 13:55:50 | haproxy/3-read-from-primary | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h haproxy-haproxy -P3306 -uroot -proot_password' logger.go:42: 13:55:50 | haproxy/3-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 13:55:50 | haproxy/3-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:55:51 | haproxy/3-read-from-primary | + data=100500 logger.go:42: 13:55:51 | haproxy/3-read-from-primary | + kubectl create configmap -n kuttl-test-loving-racer 03-read-from-primary --from-literal=data=100500 logger.go:42: 13:55:51 | haproxy/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 13:55:52 | haproxy/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | starting test step 4-read-from-replicas logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -P3307 -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-replicas --from-literal=${test_name}-haproxy-replicas=${data}] logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | + source ../../functions logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ realpath ../../.. logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ pwd logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++ test_name=haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ GIT_BRANCH=PR-975 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export MINIO_VER=5.4.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ MINIO_VER=5.4.0 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ export VAULT_VER=0.16.1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ VAULT_VER=0.16.1 logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ which gdate logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ which date logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ date=/usr/sbin/date logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ oc get projects logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ : logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ kubectl get nodes logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | +++ grep '^minikube' logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++ oc get projects logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ get_cluster_name logger.go:42: 13:55:52 | haproxy/4-read-from-replicas | ++++ kubectl -n kuttl-test-loving-racer get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | +++ get_haproxy_svc haproxy logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | +++ local cluster=haproxy logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | +++ echo haproxy-haproxy logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h haproxy-haproxy -P3307 -uroot -proot_password' logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ local 'uri=-h haproxy-haproxy -P3307 -uroot -proot_password' logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ local pod= logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | +++ get_client_pod logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 13:55:53 | haproxy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 13:55:54 | haproxy/4-read-from-replicas | mysql-clienttrue logger.go:42: 13:55:54 | haproxy/4-read-from-replicas | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h haproxy-haproxy -P3307 -uroot -proot_password' logger.go:42: 13:55:54 | haproxy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 13:55:54 | haproxy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:55:55 | haproxy/4-read-from-replicas | + data=100500 logger.go:42: 13:55:55 | haproxy/4-read-from-replicas | + kubectl create configmap -n kuttl-test-loving-racer 04-read-from-replicas --from-literal=haproxy-haproxy-replicas=100500 logger.go:42: 13:55:55 | haproxy/4-read-from-replicas | configmap/04-read-from-replicas created logger.go:42: 13:55:56 | haproxy/4-read-from-replicas | test step completed 4-read-from-replicas logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | starting test step 5-check-pods-have-same-primary logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=() for i in $(seq 0 2); do data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") done if [ "${data[0]}" != "${data[1]}" -o "${data[1]}" != "${data[2]}" ]; then echo "Not all haproxy pods point to same primary: 0: ${data[0]} 1: ${data[1]} 2: ${data[2]}" exit 1 fi] logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | + source ../../functions logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ realpath ../../.. logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++++ pwd logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ test_name=haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ GIT_BRANCH=PR-975 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export MINIO_VER=5.4.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ MINIO_VER=5.4.0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ export VAULT_VER=0.16.1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ VAULT_VER=0.16.1 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++++ which gdate logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++++ which date logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ date=/usr/sbin/date logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ oc get projects logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ : logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ kubectl get nodes logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ grep '^minikube' logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ oc get projects logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | + data=() logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ seq 0 2 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | + for i in $(seq 0 2) logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ get_primary_from_haproxy haproxy-haproxy-0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod=haproxy-haproxy-0 logger.go:42: 13:55:56 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod_ip=10.89.10.91 logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ awk '{print $2}' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ local 'uri=-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ local pod= logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | +++ get_client_pod logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ client_pod=mysql-client logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ wait_pod mysql-client logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ local pod=mysql-client logger.go:42: 13:55:57 | haproxy/5-check-pods-have-same-primary | ++ set +o xtrace logger.go:42: 13:55:58 | haproxy/5-check-pods-have-same-primary | mysql-clienttrue logger.go:42: 13:55:58 | haproxy/5-check-pods-have-same-primary | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:55:58 | haproxy/5-check-pods-have-same-primary | ++ sed -e 's/mysql: //' logger.go:42: 13:55:58 | haproxy/5-check-pods-have-same-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | + for i in $(seq 0 2) logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ get_primary_from_haproxy haproxy-haproxy-1 logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod=haproxy-haproxy-1 logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod_ip=10.89.9.56 logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ awk '{print $2}' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ local 'uri=-h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | ++ local pod= logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | +++ get_client_pod logger.go:42: 13:55:59 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ client_pod=mysql-client logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ wait_pod mysql-client logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ local pod=mysql-client logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ set +o xtrace logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | mysql-clienttrue logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ sed -e 's/mysql: //' logger.go:42: 13:56:00 | haproxy/5-check-pods-have-same-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:01 | haproxy/5-check-pods-have-same-primary | + for i in $(seq 0 2) logger.go:42: 13:56:01 | haproxy/5-check-pods-have-same-primary | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:56:01 | haproxy/5-check-pods-have-same-primary | ++ get_primary_from_haproxy haproxy-haproxy-2 logger.go:42: 13:56:01 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod=haproxy-haproxy-2 logger.go:42: 13:56:01 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ local haproxy_pod_ip=10.89.8.56 logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ local 'uri=-h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ local pod= logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ awk '{print $2}' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | +++ get_client_pod logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ client_pod=mysql-client logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ wait_pod mysql-client logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ local pod=mysql-client logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ set +o xtrace logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | mysql-clienttrue logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ sed -e 's/mysql: //' logger.go:42: 13:56:02 | haproxy/5-check-pods-have-same-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:03 | haproxy/5-check-pods-have-same-primary | + '[' haproxy-mysql-0 '!=' haproxy-mysql-0 -o haproxy-mysql-0 '!=' haproxy-mysql-0 ']' logger.go:42: 13:56:03 | haproxy/5-check-pods-have-same-primary | test step completed 5-check-pods-have-same-primary logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | starting test step 6-check-label-haproxy-primary logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary_pod_from_label="$(get_primary_from_label)" primary_pod_from_haproxy="$(get_primary_from_haproxy ${test_name}-haproxy-0)" if [ "${primary_pod_from_label}" != "${primary_pod_from_haproxy}" ]; then echo "Primary in k8s label (${primary_pod_from_label}) is not set to same pod as in haproxy (${primary_pod_from_haproxy})!" exit 1 fi] logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | + source ../../functions logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ realpath ../../.. logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++++ pwd logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++ test_name=haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ GIT_BRANCH=PR-975 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export MINIO_VER=5.4.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ MINIO_VER=5.4.0 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ export VAULT_VER=0.16.1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ VAULT_VER=0.16.1 logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++++ which gdate logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | ++++ which date logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ date=/usr/sbin/date logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ oc get projects logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ : logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ kubectl get nodes logger.go:42: 13:56:03 | haproxy/6-check-label-haproxy-primary | +++ grep '^minikube' logger.go:42: 13:56:04 | haproxy/6-check-label-haproxy-primary | ++ oc get projects logger.go:42: 13:56:04 | haproxy/6-check-label-haproxy-primary | ++ get_primary_from_label logger.go:42: 13:56:04 | haproxy/6-check-label-haproxy-primary | ++ kubectl -n kuttl-test-loving-racer get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | + primary_pod_from_label=haproxy-mysql-0 logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ get_primary_from_haproxy haproxy-haproxy-0 logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ local haproxy_pod=haproxy-haproxy-0 logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ local haproxy_pod_ip=10.89.10.91 logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ local 'uri=-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ local pod= logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | ++ awk '{print $2}' logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | +++ get_client_pod logger.go:42: 13:56:05 | haproxy/6-check-label-haproxy-primary | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ client_pod=mysql-client logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ wait_pod mysql-client logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ local pod=mysql-client logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ set +o xtrace logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | mysql-clienttrue logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ sed -e 's/mysql: //' logger.go:42: 13:56:06 | haproxy/6-check-label-haproxy-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:07 | haproxy/6-check-label-haproxy-primary | + primary_pod_from_haproxy=haproxy-mysql-0 logger.go:42: 13:56:07 | haproxy/6-check-label-haproxy-primary | + '[' haproxy-mysql-0 '!=' haproxy-mysql-0 ']' logger.go:42: 13:56:07 | haproxy/6-check-label-haproxy-primary | test step completed 6-check-label-haproxy-primary logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | starting test step 7-check-primary-failover logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | running command: [sh -c set -o errexit set -o xtrace source ../../functions primary_pod_from_label="$(get_primary_from_label)" kubectl -n "${NAMESPACE}" delete pod ${primary_pod_from_label} sleep 3 data=() for i in $(seq 0 2); do data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") done if [ "${data[0]}" != "${data[1]}" -o "${data[1]}" != "${data[2]}" ]; then echo "Not all haproxy pods point to same primary: 0: ${data[0]} 1: ${data[1]} 2: ${data[2]}" exit 1 fi primary_pod_from_label="$(get_primary_from_label)" primary_pod_from_haproxy="$(get_primary_from_haproxy ${test_name}-haproxy-0)" if [ "${primary_pod_from_label}" != "${primary_pod_from_haproxy}" ]; then echo "Primary in k8s label (${primary_pod_from_label}) is not set to same pod as in haproxy (${primary_pod_from_haproxy})!" exit 1 fi] logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | + source ../../functions logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ realpath ../../.. logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++++ pwd logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++ test_name=haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ GIT_BRANCH=PR-975 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export MINIO_VER=5.4.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ MINIO_VER=5.4.0 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ export VAULT_VER=0.16.1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ VAULT_VER=0.16.1 logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++++ which gdate logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | ++++ which date logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ date=/usr/sbin/date logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ oc get projects logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ : logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ kubectl get nodes logger.go:42: 13:56:07 | haproxy/7-check-primary-failover | +++ grep '^minikube' logger.go:42: 13:56:08 | haproxy/7-check-primary-failover | ++ oc get projects logger.go:42: 13:56:08 | haproxy/7-check-primary-failover | ++ get_primary_from_label logger.go:42: 13:56:08 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 13:56:08 | haproxy/7-check-primary-failover | + primary_pod_from_label=haproxy-mysql-0 logger.go:42: 13:56:08 | haproxy/7-check-primary-failover | + kubectl -n kuttl-test-loving-racer delete pod haproxy-mysql-0 logger.go:42: 13:56:09 | haproxy/7-check-primary-failover | pod "haproxy-mysql-0" deleted logger.go:42: 13:56:29 | haproxy/7-check-primary-failover | + sleep 3 logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | + data=() logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | ++ seq 0 2 logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | + for i in $(seq 0 2) logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | ++ get_primary_from_haproxy haproxy-haproxy-0 logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | ++ local haproxy_pod=haproxy-haproxy-0 logger.go:42: 13:56:32 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ local haproxy_pod_ip=10.89.10.91 logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ local 'uri=-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ local pod= logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ awk '{print $2}' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | +++ get_client_pod logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ client_pod=mysql-client logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ wait_pod mysql-client logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ local pod=mysql-client logger.go:42: 13:56:33 | haproxy/7-check-primary-failover | ++ set +o xtrace logger.go:42: 13:56:34 | haproxy/7-check-primary-failover | mysql-clienttrue logger.go:42: 13:56:34 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:34 | haproxy/7-check-primary-failover | ++ sed -e 's/mysql: //' logger.go:42: 13:56:34 | haproxy/7-check-primary-failover | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | + for i in $(seq 0 2) logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ get_primary_from_haproxy haproxy-haproxy-1 logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ local haproxy_pod=haproxy-haproxy-1 logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-1 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ local haproxy_pod_ip=10.89.9.56 logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ awk '{print $2}' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ local 'uri=-h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | ++ local pod= logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | +++ get_client_pod logger.go:42: 13:56:35 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ client_pod=mysql-client logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ wait_pod mysql-client logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ local pod=mysql-client logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ set +o xtrace logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | mysql-clienttrue logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.9.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ sed -e 's/mysql: //' logger.go:42: 13:56:36 | haproxy/7-check-primary-failover | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:37 | haproxy/7-check-primary-failover | + for i in $(seq 0 2) logger.go:42: 13:56:37 | haproxy/7-check-primary-failover | + data+=("$(get_primary_from_haproxy ${test_name}-haproxy-$i)") logger.go:42: 13:56:37 | haproxy/7-check-primary-failover | ++ get_primary_from_haproxy haproxy-haproxy-2 logger.go:42: 13:56:37 | haproxy/7-check-primary-failover | ++ local haproxy_pod=haproxy-haproxy-2 logger.go:42: 13:56:37 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-2 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ local haproxy_pod_ip=10.89.8.56 logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ awk '{print $2}' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ local 'uri=-h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ local pod= logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | +++ get_client_pod logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ client_pod=mysql-client logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ wait_pod mysql-client logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ local pod=mysql-client logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ set +o xtrace logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | mysql-clienttrue logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.8.56 -P3306 -uroot -proot_password' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ sed -e 's/mysql: //' logger.go:42: 13:56:38 | haproxy/7-check-primary-failover | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:39 | haproxy/7-check-primary-failover | + '[' haproxy-mysql-2 '!=' haproxy-mysql-2 -o haproxy-mysql-2 '!=' haproxy-mysql-2 ']' logger.go:42: 13:56:39 | haproxy/7-check-primary-failover | ++ get_primary_from_label logger.go:42: 13:56:39 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer get pods -l mysql.percona.com/primary=true '-ojsonpath={.items[0].metadata.name}' logger.go:42: 13:56:40 | haproxy/7-check-primary-failover | + primary_pod_from_label=haproxy-mysql-2 logger.go:42: 13:56:40 | haproxy/7-check-primary-failover | ++ get_primary_from_haproxy haproxy-haproxy-0 logger.go:42: 13:56:40 | haproxy/7-check-primary-failover | ++ local haproxy_pod=haproxy-haproxy-0 logger.go:42: 13:56:40 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods haproxy-haproxy-0 -o 'jsonpath={.status.podIP}' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ local haproxy_pod_ip=10.89.10.91 logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ run_mysql 'SHOW VARIABLES LIKE '\''%hostname%'\'';' '-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ local 'command=SHOW VARIABLES LIKE '\''%hostname%'\'';' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ local 'uri=-h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ local pod= logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ awk '{print $2}' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | +++ get_client_pod logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | +++ kubectl -n kuttl-test-loving-racer get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ client_pod=mysql-client logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ wait_pod mysql-client logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ local pod=mysql-client logger.go:42: 13:56:41 | haproxy/7-check-primary-failover | ++ set +o xtrace logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | mysql-clienttrue logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | ++ kubectl -n kuttl-test-loving-racer exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SHOW VARIABLES LIKE '\''%hostname%'\'';" | mysql -sN -h 10.89.10.91 -P3306 -uroot -proot_password' logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | ++ sed -e 's/mysql: //' logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | + primary_pod_from_haproxy=haproxy-mysql-2 logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | + '[' haproxy-mysql-2 '!=' haproxy-mysql-2 ']' logger.go:42: 13:56:42 | haproxy/7-check-primary-failover | test step completed 7-check-primary-failover logger.go:42: 13:56:42 | haproxy/8-check-password-leak | starting test step 8-check-password-leak logger.go:42: 13:56:42 | haproxy/8-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions # Temporarily skipping this check # check_passwords_leak] logger.go:42: 13:56:42 | haproxy/8-check-password-leak | + source ../../functions logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ realpath ../../.. logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++++ pwd logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++ test_name=haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ GIT_BRANCH=PR-975 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export MINIO_VER=5.4.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ MINIO_VER=5.4.0 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ export VAULT_VER=0.16.1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ VAULT_VER=0.16.1 logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++++ which gdate logger.go:42: 13:56:42 | haproxy/8-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:56:42 | haproxy/8-check-password-leak | ++++ which date logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ date=/usr/sbin/date logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ oc get projects logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ : logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ kubectl get nodes logger.go:42: 13:56:42 | haproxy/8-check-password-leak | +++ grep '^minikube' logger.go:42: 13:56:43 | haproxy/8-check-password-leak | ++ oc get projects logger.go:42: 13:56:43 | haproxy/8-check-password-leak | test step completed 8-check-password-leak logger.go:42: 13:56:43 | haproxy/9-disable-haproxy | starting test step 9-disable-haproxy logger.go:42: 13:56:44 | haproxy/9-disable-haproxy | PerconaServerMySQL:kuttl-test-loving-racer/haproxy updated logger.go:42: 13:57:30 | haproxy/9-disable-haproxy | test step completed 9-disable-haproxy logger.go:42: 13:57:30 | haproxy/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 13:57:31 | haproxy/98-drop-finalizer | PerconaServerMySQL:kuttl-test-loving-racer/haproxy updated logger.go:42: 13:57:31 | haproxy/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 13:57:31 | haproxy/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/tests/haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++ test_name=haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/vars.sh logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-975 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/deploy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-975/e2e-tests/conf logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-975 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-975 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export VERSION=PR-975-d7710be1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ VERSION=PR-975-d7710be1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-975-d7710be1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-975/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++++ which date logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ : logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 13:57:32 | haproxy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:57:33 | haproxy/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 13:57:33 | haproxy/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 13:57:33 | haproxy/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 13:57:33 | haproxy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 13:57:33 | haproxy/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 13:57:40 | haproxy/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 13:57:41 | haproxy | haproxy events from ns kuttl-test-loving-racer: logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:12 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-loving-racer/mysql-client to gke-jen-ps-975-d7710be1--default-pool-9eec0249-7rq4 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:13 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql8.0" already present on machine kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:13 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:13 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:24 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:24 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:24 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-loving-racer/datadir-haproxy-mysql-0" pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:24 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Claim datadir-haproxy-mysql-0 Pod haproxy-mysql-0 in StatefulSet haproxy-mysql success statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:24 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Pod haproxy-mysql-0 in StatefulSet haproxy-mysql successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:25 +0000 UTC Normal Pod haproxy-orc-0 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-orc-0 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-6cxs default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:25 +0000 UTC Normal StatefulSet.apps haproxy-orc SuccessfulCreate create Pod haproxy-orc-0 in StatefulSet haproxy-orc successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:25 +0000 UTC Normal PodDisruptionBudget.policy haproxy-orchestrator NoPods No matching pods found controllermanager logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:26 +0000 UTC Normal Pod haproxy-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:26 +0000 UTC Normal Pod haproxy-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 144ms (144ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:26 +0000 UTC Normal Pod haproxy-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:26 +0000 UTC Normal Pod haproxy-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:26 +0000 UTC Warning PerconaServerMySQL.ps.percona.com haproxy ClusterStateChanged Error -> Initializing ps-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-d6d353b4-dc9e-4c1c-87d4-968d22d206b7 pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 94ms (94ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 102ms (102ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:28 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:29 +0000 UTC Normal Pod haproxy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-mysql-0 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-7rq4 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:37 +0000 UTC Normal Pod haproxy-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6d353b4-dc9e-4c1c-87d4-968d22d206b7" attachdetach-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:38 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:38 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 179ms (179ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:38 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:38 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 103ms (103ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 103ms (103ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 103ms (103ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:51:41 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:01 +0000 UTC Normal Pod haproxy-orc-1 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-orc-1 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-xh68 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:01 +0000 UTC Normal Pod haproxy-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:01 +0000 UTC Normal StatefulSet.apps haproxy-orc SuccessfulCreate create Pod haproxy-orc-1 in StatefulSet haproxy-orc successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:02 +0000 UTC Normal Pod haproxy-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 135ms (135ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:02 +0000 UTC Normal Pod haproxy-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:02 +0000 UTC Normal Pod haproxy-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 110ms (110ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 105ms (105ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:03 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:13 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:13 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:13 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-loving-racer/datadir-haproxy-mysql-1" pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:13 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Claim datadir-haproxy-mysql-1 Pod haproxy-mysql-1 in StatefulSet haproxy-mysql success statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:13 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Pod haproxy-mysql-1 in StatefulSet haproxy-mysql successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:17 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d1716a7c-5b16-427a-8e6e-c3e68a4b90a8 pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:17 +0000 UTC Normal Pod haproxy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-mysql-1 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-6cxs default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal Pod haproxy-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-haproxy-0 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-7rq4 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal Pod haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal Pod haproxy-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 135ms (135ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal Pod haproxy-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal Pod haproxy-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:18 +0000 UTC Normal StatefulSet.apps haproxy-haproxy SuccessfulCreate create Pod haproxy-haproxy-0 in StatefulSet haproxy-haproxy successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 114ms (114ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 89ms (90ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:21 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:22 +0000 UTC Warning Pod haproxy-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:25 +0000 UTC Normal Pod haproxy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d1716a7c-5b16-427a-8e6e-c3e68a4b90a8" attachdetach-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:26 +0000 UTC Normal Pod haproxy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:26 +0000 UTC Normal Pod haproxy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 141ms (141ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:26 +0000 UTC Normal Pod haproxy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:26 +0000 UTC Normal Pod haproxy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 95ms (95ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 109ms (109ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 118ms (118ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:28 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:29 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:36 +0000 UTC Normal Pod haproxy-orc-2 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-orc-2 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-7rq4 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:36 +0000 UTC Normal StatefulSet.apps haproxy-orc SuccessfulCreate create Pod haproxy-orc-2 in StatefulSet haproxy-orc successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:37 +0000 UTC Normal Pod haproxy-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:37 +0000 UTC Normal Pod haproxy-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 124ms (124ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:37 +0000 UTC Normal Pod haproxy-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:37 +0000 UTC Normal Pod haproxy-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:38 +0000 UTC Warning Pod haproxy-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 123ms (123ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 105ms (105ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:39 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:47 +0000 UTC Warning Pod haproxy-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/25 13:52:46 Waiting for MySQL ready state 2025/08/25 13:52:46 MySQL is ready 2025/08/25 13:52:46 Peers: [3161303130393761.haproxy-mysql-unready.kuttl-test-loving-racer 6562353065333635.haproxy-mysql-unready.kuttl-test-loving-racer] 2025/08/25 13:52:46 FQDN: haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:52:46 Primary: haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer Replicas: [haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer] 2025/08/25 13:52:46 lookup haproxy-mysql-1 [10.89.9.55] 2025/08/25 13:52:46 PodIP: 10.89.9.55 2025/08/25 13:52:46 lookup haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer [10.89.10.90] 2025/08/25 13:52:46 PrimaryIP: 10.89.10.90 2025/08/25 13:52:47 Donor: haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:52:47 Opening connection to 10.89.9.55 2025/08/25 13:52:47 Clone required: true 2025/08/25 13:52:47 Checking if a clone in progress 2025/08/25 13:52:47 Clone in progress: false 2025/08/25 13:52:47 Cloning from haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:52:47 Clone finished. Restarting container... kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:47 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:51 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 149ms (149ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:58 +0000 UTC Normal Pod haproxy-haproxy-0.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:52:58 +0000 UTC Warning Pod haproxy-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 516279f76c5555ab1c83043ebf9a21b484b0fd6cf0b2e98413b170cdc00e0a56 not found: not found kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:21 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:21 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:21 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-loving-racer/datadir-haproxy-mysql-2" pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:21 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Claim datadir-haproxy-mysql-2 Pod haproxy-mysql-2 in StatefulSet haproxy-mysql success statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:21 +0000 UTC Normal StatefulSet.apps haproxy-mysql SuccessfulCreate create Pod haproxy-mysql-2 in StatefulSet haproxy-mysql successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:25 +0000 UTC Normal PersistentVolumeClaim datadir-haproxy-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-540615e9-5d3a-434c-9a41-d3634e6b68f6 pd.csi.storage.gke.io_gke-3493d58ee4384541b56b-375f-663e-vm_82e01f07-e480-4cf0-b4f0-184071d33332 logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:25 +0000 UTC Normal Pod haproxy-mysql-2 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-mysql-2 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-xh68 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:33 +0000 UTC Normal Pod haproxy-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-540615e9-5d3a-434c-9a41-d3634e6b68f6" attachdetach-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:37 +0000 UTC Normal Pod haproxy-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:37 +0000 UTC Normal Pod haproxy-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 150ms (150ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:37 +0000 UTC Normal Pod haproxy-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:37 +0000 UTC Normal Pod haproxy-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:39 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:39 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 109ms (109ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:39 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:39 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:39 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 143ms (143ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 147ms (147ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:40 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:58 +0000 UTC Warning Pod haproxy-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/25 13:53:57 Waiting for MySQL ready state 2025/08/25 13:53:57 MySQL is ready 2025/08/25 13:53:57 Peers: [3161303130393761.haproxy-mysql-unready.kuttl-test-loving-racer 3163386138646564.haproxy-mysql-unready.kuttl-test-loving-racer 6562353065333635.haproxy-mysql-unready.kuttl-test-loving-racer] 2025/08/25 13:53:57 FQDN: haproxy-mysql-2.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:53:57 Primary: haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer Replicas: [haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer haproxy-mysql-2.haproxy-mysql.kuttl-test-loving-racer] 2025/08/25 13:53:57 lookup haproxy-mysql-2 [10.89.8.55] 2025/08/25 13:53:57 PodIP: 10.89.8.55 2025/08/25 13:53:57 lookup haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer [10.89.10.90] 2025/08/25 13:53:57 PrimaryIP: 10.89.10.90 2025/08/25 13:53:57 Donor: haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:53:57 Opening connection to 10.89.8.55 2025/08/25 13:53:57 Clone required: true 2025/08/25 13:53:57 Checking if a clone in progress 2025/08/25 13:53:57 Clone in progress: false 2025/08/25 13:53:57 Cloning from haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:53:58 Clone finished. Restarting container... kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:53:58 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:01 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 122ms (122ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:08 +0000 UTC Normal Pod haproxy-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-haproxy-1 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-6cxs default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:08 +0000 UTC Normal StatefulSet.apps haproxy-haproxy SuccessfulCreate create Pod haproxy-haproxy-1 in StatefulSet haproxy-haproxy successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:09 +0000 UTC Normal Pod haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:09 +0000 UTC Normal Pod haproxy-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 117ms (117ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:09 +0000 UTC Normal Pod haproxy-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:09 +0000 UTC Normal Pod haproxy-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 101ms (101ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 105ms (105ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:11 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:12 +0000 UTC Warning Pod haproxy-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:54 +0000 UTC Normal Pod haproxy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-haproxy-2 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-xh68 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:54 +0000 UTC Normal StatefulSet.apps haproxy-haproxy SuccessfulCreate create Pod haproxy-haproxy-2 in StatefulSet haproxy-haproxy successful statefulset-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:55 +0000 UTC Normal Pod haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:55 +0000 UTC Normal Pod haproxy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 134ms (134ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:55 +0000 UTC Normal Pod haproxy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:55 +0000 UTC Normal Pod haproxy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:56 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:56 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 89ms (89ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:56 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:56 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:56 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:57 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 115ms (115ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:57 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:57 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:54:57 +0000 UTC Warning Pod haproxy-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:55:41 +0000 UTC Warning PerconaServerMySQL.ps.percona.com haproxy ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:09 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:09 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:09 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:13 +0000 UTC Warning Pod haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/25 13:56:13 MySQL state is not ready... kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:13 +0000 UTC Warning PerconaServerMySQL.ps.percona.com haproxy ClusterStateChanged Ready -> Initializing ps-controller logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:18 +0000 UTC Warning Pod haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/25 13:56:18 MySQL state is not ready... kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:23 +0000 UTC Warning Pod haproxy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:29 +0000 UTC Normal Pod haproxy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-loving-racer/haproxy-mysql-0 to gke-jen-ps-975-d7710be1--default-pool-9eec0249-7rq4 default-scheduler logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:30 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:30 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-975-d7710be1" in 122ms (122ms including waiting). Image size: 108990256 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:30 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:31 +0000 UTC Normal Pod haproxy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 88ms (88ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 114ms (114ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:33 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 115ms (115ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:33 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:33 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:44 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:44 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:44 +0000 UTC Normal Pod haproxy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:44 +0000 UTC Warning PodDisruptionBudget.policy haproxy-haproxy CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "haproxy-haproxy-0" controllermanager logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:45 +0000 UTC Normal Pod haproxy-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:51 +0000 UTC Warning Pod haproxy-mysql-0.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/25 13:56:50 Waiting for MySQL ready state 2025/08/25 13:56:50 MySQL is ready 2025/08/25 13:56:50 Peers: [3161303130393761.haproxy-mysql-unready.kuttl-test-loving-racer 3163386138646564.haproxy-mysql-unready.kuttl-test-loving-racer 6561363638623565.haproxy-mysql-unready.kuttl-test-loving-racer] 2025/08/25 13:56:50 FQDN: haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:56:50 Primary: haproxy-mysql-2.haproxy-mysql.kuttl-test-loving-racer Replicas: [haproxy-mysql-0.haproxy-mysql.kuttl-test-loving-racer haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer] 2025/08/25 13:56:50 lookup haproxy-mysql-0 [10.89.10.93] 2025/08/25 13:56:50 PodIP: 10.89.10.93 2025/08/25 13:56:50 lookup haproxy-mysql-2.haproxy-mysql.kuttl-test-loving-racer [10.89.8.55] 2025/08/25 13:56:50 PrimaryIP: 10.89.8.55 2025/08/25 13:56:50 Donor: haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:56:50 Opening connection to 10.89.10.93 2025/08/25 13:56:50 Clone required: true 2025/08/25 13:56:50 Checking if a clone in progress 2025/08/25 13:56:50 Clone in progress: false 2025/08/25 13:56:50 Cloning from haproxy-mysql-1.haproxy-mysql.kuttl-test-loving-racer 2025/08/25 13:56:51 Clone finished. Restarting container... kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:51 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:56:55 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 116ms (116ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 13:57:41 | haproxy | 2025-08-25 13:57:32 +0000 UTC Normal Pod haproxy-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 13:57:41 | haproxy | Deleting namespace: kuttl-test-loving-racer === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- PASS: kuttl (422.11s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/haproxy (421.38s) PASS