=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.46.120.156 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/init-deploy === PAUSE kuttl/harness/init-deploy === CONT kuttl/harness/init-deploy logger.go:42: 12:50:05 | init-deploy | Creating namespace: kuttl-test-still-hare logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + source ../../functions logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ realpath ../../.. logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++++ pwd logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++ test_name=init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ GIT_BRANCH=PR-929 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++++ which gdate logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++++ which date logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ oc get projects logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ : logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ kubectl get nodes logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | +++ grep '^minikube' logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | ++ oc get projects logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + init_temp_dir logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + rm -rf /tmp/kuttl/ps/init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/init-deploy logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + deploy_operator logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + destroy_operator logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 12:50:05 | init-deploy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + true logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + true logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + create_namespace ps-operator logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + local namespace=ps-operator logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + [[ -n '' ]] logger.go:42: 12:50:06 | init-deploy/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 12:50:07 | init-deploy/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 12:50:07 | init-deploy/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 12:50:07 | init-deploy/0-deploy-operator | namespace/ps-operator created logger.go:42: 12:50:07 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy/crd.yaml logger.go:42: 12:50:09 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 12:50:09 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 12:50:10 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 12:50:10 | init-deploy/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 12:50:10 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy/cw-rbac.yaml logger.go:42: 12:50:11 | init-deploy/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 12:50:11 | init-deploy/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:12 | init-deploy/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-929-2cac30ca"' /mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy/cw-operator.yaml logger.go:42: 12:50:13 | init-deploy/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 12:50:13 | init-deploy/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 12:50:13 | init-deploy/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 12:50:13 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-still-hare apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf/secrets.yaml logger.go:42: 12:50:14 | init-deploy/0-deploy-operator | secret/test-secrets created logger.go:42: 12:50:14 | init-deploy/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 12:50:14 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-still-hare apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf/ssl-secret.yaml logger.go:42: 12:50:15 | init-deploy/0-deploy-operator | secret/test-ssl created logger.go:42: 12:50:15 | init-deploy/0-deploy-operator | + deploy_client logger.go:42: 12:50:15 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-still-hare apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf/client.yaml logger.go:42: 12:50:16 | init-deploy/0-deploy-operator | pod/mysql-client created logger.go:42: 12:50:17 | init-deploy/0-deploy-operator | ResourceQuota:kuttl-test-still-hare/init-deploy-resource-quota created logger.go:42: 12:50:17 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:50:17 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:50:18 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 12:50:19 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:50:19 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:50:19 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 12:50:21 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:50:21 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:50:21 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 12:50:23 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:50:23 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:50:24 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | ASSERT PASS logger.go:42: 12:50:25 | init-deploy/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 12:50:25 | init-deploy/1-create-cluster | starting test step 1-create-cluster logger.go:42: 12:50:25 | init-deploy/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.mysql.resources.limits.cpu="500m"' - \ | yq eval '.spec.mysql.resources.limits.memory="1G"' - \ | yq eval '.spec.mysql.resources.requests.cpu="400m"' - \ | yq eval '.spec.mysql.resources.requests.memory="1G"' - \ | yq eval '.spec.backup.resources.limits.cpu="200m"' - \ | yq eval '.spec.backup.resources.limits.memory="256Mi"' - \ | yq eval '.spec.backup.resources.requests.cpu="100m"' - \ | yq eval '.spec.backup.resources.requests.memory="128Mi"' - \ | yq eval '.spec.toolkit.resources.limits.cpu="150m"' - \ | yq eval '.spec.toolkit.resources.limits.memory="256Mi"' - \ | yq eval '.spec.toolkit.resources.requests.cpu="100m"' - \ | yq eval '.spec.toolkit.resources.requests.memory="128Mi"' - \ | yq eval '.spec.proxy.haproxy.resources.limits.cpu="600m"' - \ | yq eval '.spec.proxy.haproxy.resources.limits.memory="512Mi"' - \ | yq eval '.spec.proxy.haproxy.resources.requests.cpu="500m"' - \ | yq eval '.spec.proxy.haproxy.resources.requests.memory="256Mi"' - \ | yq eval '.spec.orchestrator.resources.limits.cpu="200m"' - \ | yq eval '.spec.orchestrator.resources.limits.memory="256Mi"' - \ | yq eval '.spec.orchestrator.resources.requests.cpu="100m"' - \ | yq eval '.spec.orchestrator.resources.requests.memory="128Mi"' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 12:50:25 | init-deploy/1-create-cluster | + source ../../functions logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ realpath ../../.. logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++++ pwd logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++ test_name=init-deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ GIT_BRANCH=PR-929 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++++ which gdate logger.go:42: 12:50:25 | init-deploy/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:50:25 | init-deploy/1-create-cluster | ++++ which date logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ oc get projects logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ : logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ grep '^minikube' logger.go:42: 12:50:25 | init-deploy/1-create-cluster | +++ kubectl get nodes logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ oc get projects logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + get_cr logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + local name_suffix= logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.requests.memory="1G"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.limits.cpu="200m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.requests.cpu="400m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.limits.cpu="200m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.limits.memory="256Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.requests.cpu="100m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.limits.memory="1G"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.limits.cpu="500m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.requests.cpu="100m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.requests.memory="128Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.limits.cpu="150m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.requests.memory="128Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.limits.memory="256Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.limits.cpu="600m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.requests.memory="128Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.limits.memory="512Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.requests.cpu="500m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.requests.memory="256Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.limits.memory="256Mi"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.requests.cpu="100m"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + '[' -n '' ']' logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + kubectl -n kuttl-test-still-hare apply -f - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-929-2cac30ca"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.metadata.name="%s"' init-deploy logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.metadata.name="init-deploy"' /mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy/cr.yaml logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 12:50:26 | init-deploy/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 12:50:26 | init-deploy/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 12:50:27 | init-deploy/1-create-cluster | perconaservermysql.ps.percona.com/init-deploy created logger.go:42: 12:56:55 | init-deploy/1-create-cluster | test step completed 1-create-cluster logger.go:42: 12:56:55 | init-deploy/2-write-data | starting test step 2-write-data logger.go:42: 12:56:55 | init-deploy/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 12:56:55 | init-deploy/2-write-data | + source ../../functions logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ realpath ../../.. logger.go:42: 12:56:55 | init-deploy/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:56:55 | init-deploy/2-write-data | ++++ pwd logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | ++ test_name=init-deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:56:55 | init-deploy/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ GIT_BRANCH=PR-929 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:56:55 | init-deploy/2-write-data | ++++ which gdate logger.go:42: 12:56:55 | init-deploy/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:56:55 | init-deploy/2-write-data | ++++ which date logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ date=/usr/bin/date logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ oc get projects logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ : logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ kubectl get nodes logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ grep '^minikube' logger.go:42: 12:56:55 | init-deploy/2-write-data | ++ oc get projects logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ get_cluster_name logger.go:42: 12:56:55 | init-deploy/2-write-data | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:56:56 | init-deploy/2-write-data | ++ get_haproxy_svc init-deploy logger.go:42: 12:56:56 | init-deploy/2-write-data | ++ local cluster=init-deploy logger.go:42: 12:56:56 | init-deploy/2-write-data | ++ echo init-deploy-haproxy logger.go:42: 12:56:56 | init-deploy/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:56 | init-deploy/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 12:56:56 | init-deploy/2-write-data | + local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:56 | init-deploy/2-write-data | + local pod= logger.go:42: 12:56:56 | init-deploy/2-write-data | ++ get_client_pod logger.go:42: 12:56:56 | init-deploy/2-write-data | ++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:56:56 | init-deploy/2-write-data | + client_pod=mysql-client logger.go:42: 12:56:56 | init-deploy/2-write-data | + wait_pod mysql-client logger.go:42: 12:56:56 | init-deploy/2-write-data | + local pod=mysql-client logger.go:42: 12:56:56 | init-deploy/2-write-data | + set +o xtrace logger.go:42: 12:56:56 | init-deploy/2-write-data | mysql-clienttrue logger.go:42: 12:56:56 | init-deploy/2-write-data | + kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:56 | init-deploy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 12:56:56 | init-deploy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:56:57 | init-deploy/2-write-data | + : logger.go:42: 12:56:57 | init-deploy/2-write-data | +++ get_cluster_name logger.go:42: 12:56:57 | init-deploy/2-write-data | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:56:58 | init-deploy/2-write-data | ++ get_haproxy_svc init-deploy logger.go:42: 12:56:58 | init-deploy/2-write-data | ++ local cluster=init-deploy logger.go:42: 12:56:58 | init-deploy/2-write-data | ++ echo init-deploy-haproxy logger.go:42: 12:56:58 | init-deploy/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:58 | init-deploy/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 12:56:58 | init-deploy/2-write-data | + local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:58 | init-deploy/2-write-data | + local pod= logger.go:42: 12:56:58 | init-deploy/2-write-data | ++ get_client_pod logger.go:42: 12:56:58 | init-deploy/2-write-data | ++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:56:58 | init-deploy/2-write-data | + client_pod=mysql-client logger.go:42: 12:56:58 | init-deploy/2-write-data | + wait_pod mysql-client logger.go:42: 12:56:58 | init-deploy/2-write-data | + local pod=mysql-client logger.go:42: 12:56:58 | init-deploy/2-write-data | + set +o xtrace logger.go:42: 12:56:59 | init-deploy/2-write-data | mysql-clienttrue logger.go:42: 12:56:59 | init-deploy/2-write-data | + kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:56:59 | init-deploy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 12:56:59 | init-deploy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:57:00 | init-deploy/2-write-data | + : [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 36 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002a9c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002a9c00}, 0x0}, {0x184a055?, 0xc0005cbf80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00043dea0, {0x1accd90, 0xc0002a8380}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc0002bcb48?, {0x0, 0xc00043dea0, {0x1accd90, 0xc0002a8380}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0002bcb48, {0x0, 0xc00043dea0, {0x1accd90, 0xc0002a8380}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0001f8c08, 0x76?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0002d3860, 0xc00035a340, {0xc0005aa390, 0x15}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0002d3860, 0xc00035a340, {0xc0005aa390, 0x15}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0000a8f00, 0xc00035a340, 0xc00011c240) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc00035a340) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc00035a340, 0xc000348930) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 35 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 12:57:00 | init-deploy/2-write-data | test step completed 2-write-data logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | + source ../../functions logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ realpath ../../.. logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ pwd logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++ test_name=init-deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ GIT_BRANCH=PR-929 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export MINIO_VER=5.4.0 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ MINIO_VER=5.4.0 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ which gdate logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ which date logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ date=/usr/bin/date logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ oc get projects logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ : logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ kubectl get nodes logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | +++ grep '^minikube' logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++ oc get projects logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ get_cluster_name logger.go:42: 12:57:00 | init-deploy/3-read-from-primary | ++++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | +++ get_haproxy_svc init-deploy logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | +++ local cluster=init-deploy logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | +++ echo init-deploy-haproxy logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ local pod= logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | +++ get_client_pod logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | +++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ client_pod=mysql-client logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ wait_pod mysql-client logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ local pod=mysql-client logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ set +o xtrace logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | mysql-clienttrue logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 12:57:01 | init-deploy/3-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:57:02 | init-deploy/3-read-from-primary | + data=100500 logger.go:42: 12:57:02 | init-deploy/3-read-from-primary | + kubectl create configmap -n kuttl-test-still-hare 03-read-from-primary --from-literal=data=100500 logger.go:42: 12:57:03 | init-deploy/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 12:57:03 | init-deploy/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | starting test step 4-read-from-replicas logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions args='' size=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.spec.mysql.size}') for i in $(seq 0 $((size - 1))); do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") args="${args} --from-literal=${host}=${data}" done kubectl create configmap -n "${NAMESPACE}" 04-read-from-replicas ${args}] logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | + source ../../functions logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ realpath ../../.. logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++++ pwd logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++ test_name=init-deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ GIT_BRANCH=PR-929 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export MINIO_VER=5.4.0 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ MINIO_VER=5.4.0 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++++ which gdate logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++++ which date logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ oc get projects logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ : logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ kubectl get nodes logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ grep '^minikube' logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | ++ oc get projects logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | + args= logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 12:57:03 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-still-hare get ps init-deploy -o 'jsonpath={.spec.mysql.size}' logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | + size=3 logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | ++ seq 0 2 logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 12:57:04 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 0 logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local index=0 logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-0.init-deploy-mysql logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-0.init-deploy-mysql logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 12:57:05 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500' logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 12:57:06 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 1 logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local index=1 logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-1.init-deploy-mysql logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-1.init-deploy-mysql logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 12:57:07 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 12:57:08 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 12:57:08 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:08 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:57:08 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 2 logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ local index=2 logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-2.init-deploy-mysql logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-2.init-deploy-mysql logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 12:57:09 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 12:57:10 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 12:57:11 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 12:57:11 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-2.init-deploy-mysql=100500' logger.go:42: 12:57:11 | init-deploy/4-read-from-replicas | + kubectl create configmap -n kuttl-test-still-hare 04-read-from-replicas --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-2.init-deploy-mysql=100500 logger.go:42: 12:57:11 | init-deploy/4-read-from-replicas | configmap/04-read-from-replicas created logger.go:42: 12:57:12 | init-deploy/4-read-from-replicas | test step completed 4-read-from-replicas logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | starting test step 5-check-orchestrator logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | running command: [sh -c set -o errexit set -o xtrace source ../../functions orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0] | sed "s/.${NAMESPACE}//g") args="--from-literal=cluster=${cluster}" run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" | jq -r .[].Key.Hostname | sed "s/.${NAMESPACE}//g" >"${TEMP_DIR}/instances" args="${args} --from-file=instances=${TEMP_DIR}/instances" kubectl create configmap -n "${NAMESPACE}" 05-check-orchestrator ${args}] logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | + source ../../functions logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ realpath ../../.. logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++++ pwd logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++ test_name=init-deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ GIT_BRANCH=PR-929 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export MINIO_VER=5.4.0 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ MINIO_VER=5.4.0 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++++ which gdate logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++++ which date logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ date=/usr/bin/date logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ oc get projects logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ : logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ kubectl get nodes logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ grep '^minikube' logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | ++ oc get projects logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ get_cluster_name logger.go:42: 12:57:12 | init-deploy/5-check-orchestrator | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ get_orc_headless_fqdn init-deploy 0 logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ local cluster=init-deploy logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ local index=0 logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ echo init-deploy-orc-0.init-deploy-orc logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + orc_host=init-deploy-orc-0.init-deploy-orc logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ run_curl http://init-deploy-orc-0.init-deploy-orc:3000/api/clusters/ logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'curl -s -k http://init-deploy-orc-0.init-deploy-orc:3000/api/clusters/' logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ jq -r '.[0]' logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | ++ sed s/.kuttl-test-still-hare//g logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + cluster=init-deploy-mysql-0.init-deploy-mysql:3306 logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + args=--from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + run_curl http://init-deploy-orc-0.init-deploy-orc:3000/api/cluster/init-deploy-mysql-0.init-deploy-mysql:3306/ logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + kubectl -n kuttl-test-still-hare exec mysql-client -- bash -c 'curl -s -k http://init-deploy-orc-0.init-deploy-orc:3000/api/cluster/init-deploy-mysql-0.init-deploy-mysql:3306/' logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + jq -r '.[].Key.Hostname' logger.go:42: 12:57:13 | init-deploy/5-check-orchestrator | + sed s/.kuttl-test-still-hare//g logger.go:42: 12:57:14 | init-deploy/5-check-orchestrator | + args='--from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 --from-file=instances=/tmp/kuttl/ps/init-deploy/instances' logger.go:42: 12:57:14 | init-deploy/5-check-orchestrator | + kubectl create configmap -n kuttl-test-still-hare 05-check-orchestrator --from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 --from-file=instances=/tmp/kuttl/ps/init-deploy/instances logger.go:42: 12:57:15 | init-deploy/5-check-orchestrator | configmap/05-check-orchestrator created logger.go:42: 12:57:15 | init-deploy/5-check-orchestrator | test step completed 5-check-orchestrator logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | starting test step 6-check-async-repl-not-ready-cr-status logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | running command: [sh -c set -o errexit set -o xtrace source ../../functions state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "ready" ]]; then echo "Status state should be ready, but is $state." exit 1 fi run_mysqlsh "STOP REPLICA;" "-h localhost -P 33060 -uroot -proot_password" "init-deploy-mysql-2" sleep 20 state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "initializing" ]]; then echo "Status state should be initializing, but is $state." exit 1 fi run_mysqlsh "START REPLICA;" "-h localhost -P 33060 -uroot -proot_password" "init-deploy-mysql-2" sleep 20 state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "ready" ]]; then echo "Status state should be ready, but is $state." exit 1 fi] logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | + source ../../functions logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ realpath ../../.. logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ pwd logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ test_name=init-deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ GIT_BRANCH=PR-929 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export MINIO_VER=5.4.0 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ MINIO_VER=5.4.0 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ which gdate logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ which date logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ date=/usr/bin/date logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ oc get projects logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ : logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl get nodes logger.go:42: 12:57:15 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ grep '^minikube' logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ oc get projects logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-still-hare get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=ready logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ ready != \r\e\a\d\y ]] logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + run_mysqlsh 'STOP REPLICA;' '-h localhost -P 33060 -uroot -proot_password' init-deploy-mysql-2 logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'command=STOP REPLICA;' logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'uri=-h localhost -P 33060 -uroot -proot_password' logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=init-deploy-mysql-2 logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ get_client_pod logger.go:42: 12:57:16 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + client_pod=mysql-client logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + wait_pod mysql-client logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=mysql-client logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + set +o xtrace logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | mysql-clienttrue logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + kubectl -n kuttl-test-still-hare exec init-deploy-mysql-2 -- bash -c 'printf '\''%s\n'\'' "STOP REPLICA;" | mysqlsh --sql --quiet-start=2 -h localhost -P 33060 -uroot -proot_password' logger.go:42: 12:57:17 | init-deploy/6-check-async-repl-not-ready-cr-status | + tail -n +2 logger.go:42: 12:57:19 | init-deploy/6-check-async-repl-not-ready-cr-status | Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory logger.go:42: 12:57:19 | init-deploy/6-check-async-repl-not-ready-cr-status | + sleep 20 logger.go:42: 12:57:39 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 12:57:39 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:57:39 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-still-hare get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=initializing logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ initializing != \i\n\i\t\i\a\l\i\z\i\n\g ]] logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + run_mysqlsh 'START REPLICA;' '-h localhost -P 33060 -uroot -proot_password' init-deploy-mysql-2 logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'command=START REPLICA;' logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'uri=-h localhost -P 33060 -uroot -proot_password' logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=init-deploy-mysql-2 logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ get_client_pod logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-still-hare get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + client_pod=mysql-client logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + wait_pod mysql-client logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=mysql-client logger.go:42: 12:57:40 | init-deploy/6-check-async-repl-not-ready-cr-status | + set +o xtrace logger.go:42: 12:57:41 | init-deploy/6-check-async-repl-not-ready-cr-status | mysql-clienttrue logger.go:42: 12:57:41 | init-deploy/6-check-async-repl-not-ready-cr-status | + tail -n +2 logger.go:42: 12:57:41 | init-deploy/6-check-async-repl-not-ready-cr-status | + kubectl -n kuttl-test-still-hare exec init-deploy-mysql-2 -- bash -c 'printf '\''%s\n'\'' "START REPLICA;" | mysqlsh --sql --quiet-start=2 -h localhost -P 33060 -uroot -proot_password' logger.go:42: 12:57:42 | init-deploy/6-check-async-repl-not-ready-cr-status | Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory logger.go:42: 12:57:42 | init-deploy/6-check-async-repl-not-ready-cr-status | + sleep 20 logger.go:42: 12:58:02 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 12:58:02 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-still-hare get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 12:58:03 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-still-hare get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 12:58:03 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=ready logger.go:42: 12:58:03 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ ready != \r\e\a\d\y ]] logger.go:42: 12:58:03 | init-deploy/6-check-async-repl-not-ready-cr-status | test step completed 6-check-async-repl-not-ready-cr-status logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | starting test step 7-check-password-leak logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | + source ../../functions logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ realpath ../../.. logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++++ pwd logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++ test_name=init-deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ GIT_BRANCH=PR-929 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export MINIO_VER=5.4.0 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ MINIO_VER=5.4.0 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++++ which gdate logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | ++++ which date logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ date=/usr/bin/date logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ oc get projects logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ : logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ kubectl get nodes logger.go:42: 12:58:03 | init-deploy/7-check-password-leak | +++ grep '^minikube' logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | ++ oc get projects logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + check_passwords_leak logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + local secrets logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + local passwords logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + local pods logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + secrets= logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | + passwords=' ' logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pods -o name logger.go:42: 12:58:04 | init-deploy/7-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + pods='init-deploy-haproxy-0 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-haproxy-1 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-haproxy-2 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-mysql-0 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-mysql-1 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-mysql-2 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-orc-0 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-orc-1 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | init-deploy-orc-2 logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | mysql-client' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + collect_logs kuttl-test-still-hare logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + local containers logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + local count logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + NS=kuttl-test-still-hare logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:05 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-0 -c haproxy logger.go:42: 12:58:06 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-haproxy.txt logger.go:42: 12:58:06 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-haproxy.txt logger.go:42: 12:58:06 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:06 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-0 -c mysql-monit logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-mysql-monit.txt logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-mysql-monit.txt logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:07 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-1 -c haproxy logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-haproxy.txt logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-haproxy.txt logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-1 -c mysql-monit logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-mysql-monit.txt logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-mysql-monit.txt logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:09 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:10 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 12:58:10 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:10 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-2 -c haproxy logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-haproxy.txt logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-haproxy.txt logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-haproxy-2 -c mysql-monit logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-mysql-monit.txt logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-mysql-monit.txt logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:11 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:12 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 12:58:12 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:12 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-0 -c mysql logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-mysql.txt logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-mysql.txt logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-0 -c xtrabackup logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-xtrabackup.txt logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-xtrabackup.txt logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:13 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-0 -c pt-heartbeat logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-pt-heartbeat.txt logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-pt-heartbeat.txt logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:14 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-1 -c mysql logger.go:42: 12:58:15 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-mysql.txt logger.go:42: 12:58:15 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-mysql.txt logger.go:42: 12:58:15 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:15 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-1 -c xtrabackup logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-xtrabackup.txt logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-xtrabackup.txt logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-1 -c pt-heartbeat logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-pt-heartbeat.txt logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-pt-heartbeat.txt logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:16 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-2 -c mysql logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-mysql.txt logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-mysql.txt logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:17 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-2 -c xtrabackup logger.go:42: 12:58:18 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-xtrabackup.txt logger.go:42: 12:58:18 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-xtrabackup.txt logger.go:42: 12:58:18 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:18 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-mysql-2 -c pt-heartbeat logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-pt-heartbeat.txt logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-pt-heartbeat.txt logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + containers='orchestrator mysql-monit' logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:19 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-0 -c orchestrator logger.go:42: 12:58:20 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-orchestrator.txt logger.go:42: 12:58:20 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-orchestrator.txt logger.go:42: 12:58:20 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:20 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-0 -c mysql-monit logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-mysql-monit.txt logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-mysql-monit.txt logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + containers='orchestrator mysql-monit' logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:21 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-1 -c orchestrator logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-orchestrator.txt logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-orchestrator.txt logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-1 -c mysql-monit logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-mysql-monit.txt logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-mysql-monit.txt logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:22 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod init-deploy-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:23 | init-deploy/7-check-password-leak | + containers='orchestrator mysql-monit' logger.go:42: 12:58:23 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:23 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-2 -c orchestrator logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-orchestrator.txt logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-orchestrator.txt logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs init-deploy-orc-2 -c mysql-monit logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-mysql-monit.txt logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-mysql-monit.txt logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:24 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-still-hare get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + containers=mysql-client logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-still-hare logs mysql-client -c mysql-client logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-mysql-client-mysql-client.txt logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-mysql-client-mysql-client.txt logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 12:58:25 | init-deploy/7-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + pods=percona-server-mysql-operator-6dbcb4d9bb-m4d7p logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + collect_logs ps-operator logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + local containers logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + local count logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + NS=ps-operator logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-6dbcb4d9bb-m4d7p -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + containers=manager logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 12:58:26 | init-deploy/7-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-6dbcb4d9bb-m4d7p -c manager logger.go:42: 12:58:27 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-percona-server-mysql-operator-6dbcb4d9bb-m4d7p-manager.txt logger.go:42: 12:58:27 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-percona-server-mysql-operator-6dbcb4d9bb-m4d7p-manager.txt logger.go:42: 12:58:27 | init-deploy/7-check-password-leak | + echo logger.go:42: 12:58:27 | init-deploy/7-check-password-leak | logger.go:42: 12:58:27 | init-deploy/7-check-password-leak | test step completed 7-check-password-leak logger.go:42: 12:58:27 | init-deploy/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 12:58:27 | init-deploy/98-drop-finalizer | PerconaServerMySQL:kuttl-test-still-hare/init-deploy updated logger.go:42: 12:58:27 | init-deploy/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 12:58:27 | init-deploy/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/tests/init-deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++ test_name=init-deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/vars.sh logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-929 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-929/e2e-tests/conf logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-929 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-929 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export VERSION=PR-929-2cac30ca logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ VERSION=PR-929-2cac30ca logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-929-2cac30ca logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-929/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++++ which date logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ : logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 12:58:28 | init-deploy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:58:29 | init-deploy/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 12:58:29 | init-deploy/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 12:58:29 | init-deploy/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 12:58:29 | init-deploy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 12:58:29 | init-deploy/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 12:58:36 | init-deploy/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 12:58:36 | init-deploy | init-deploy events from ns kuttl-test-still-hare: logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:16 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-still-hare/mysql-client to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-2xm2 default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:17 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:17 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:17 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:28 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:28 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:28 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-still-hare/datadir-init-deploy-mysql-0" pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:28 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-0 Pod init-deploy-mysql-0 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:28 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-0 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:29 +0000 UTC Normal Pod init-deploy-orc-0 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-orc-0 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-bcrb default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:29 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-0 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 288ms (288ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-abdab557-8bc7-4ea6-9e2e-28cebe4bda1a pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-mysql-0 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-2xm2 default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 162ms (162ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:32 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 221ms (221ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:40 +0000 UTC Normal Pod init-deploy-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-abdab557-8bc7-4ea6-9e2e-28cebe4bda1a" attachdetach-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:41 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:41 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 243ms (243ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:41 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:41 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:43 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 233ms (233ms including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 242ms (242ms including waiting). Image size: 437250902 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 195ms (195ms including waiting). Image size: 132962163 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:44 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:50:45 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:04 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-1 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:05 +0000 UTC Normal Pod init-deploy-orc-1 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-orc-1 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:06 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:06 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:06 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "custom" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:06 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "kube-api-access-bsqnh" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 283ms (283ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:10 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 249ms (249ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 224ms (224ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:11 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:16 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:16 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:16 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-still-hare/datadir-init-deploy-mysql-1" pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:16 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-1 Pod init-deploy-mysql-1 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:16 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-1 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:19 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d6f81f99-1517-4b3d-af77-74ee116f1096 pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:20 +0000 UTC Normal Pod init-deploy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-mysql-1 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:21 +0000 UTC Normal Pod init-deploy-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-haproxy-0 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-bcrb default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:21 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:21 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-0 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:22 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 283ms (283ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:22 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:22 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:22 +0000 UTC Warning Pod init-deploy-orc-1.spec.containers{orchestrator} Unhealthy Liveness probe failed: Get "http://10.202.192.24:3000/api/lb-check": dial tcp 10.202.192.24:3000: connect: connection refused kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:23 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 247ms (247ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 259ms (259ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:24 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal Pod init-deploy-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-haproxy-1 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-2xm2 default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 288ms (288ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:25 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-1 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:27 +0000 UTC Normal Pod init-deploy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6f81f99-1517-4b3d-af77-74ee116f1096" attachdetach-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 220ms (220ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 213ms (213ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:28 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-haproxy-2 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-2 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 264ms (264ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:29 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:30 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 292ms (292ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:30 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:30 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:32 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:32 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 271ms (271ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 376ms (376ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 254ms (254ms including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 244ms (244ms including waiting). Image size: 437250902 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:33 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:34 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 243ms (243ms including waiting). Image size: 132962163 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:34 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:34 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:42 +0000 UTC Normal Pod init-deploy-orc-2 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-orc-2 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-2xm2 default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:42 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-2 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:43 +0000 UTC Warning Pod init-deploy-orc-2 FailedMount MountVolume.SetUp failed for volume "custom" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:44 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:44 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 272ms (272ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:44 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:44 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 240ms (240ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 239ms (239ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:47 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:59 +0000 UTC Warning Pod init-deploy-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/09 12:51:49 Waiting for bootstrap.lock to be deleted 2025/06/09 12:51:56 Waiting for MySQL ready state 2025/06/09 12:51:57 MySQL is ready 2025/06/09 12:51:57 Peers: [3139336432323739.init-deploy-mysql-unready.kuttl-test-still-hare 3466393762396438.init-deploy-mysql-unready.kuttl-test-still-hare] 2025/06/09 12:51:57 FQDN: init-deploy-mysql-1.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:51:57 Primary: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare Replicas: [init-deploy-mysql-1.init-deploy-mysql.kuttl-test-still-hare] 2025/06/09 12:51:57 lookup init-deploy-mysql-1 [10.202.192.25] 2025/06/09 12:51:57 PodIP: 10.202.192.25 2025/06/09 12:51:57 lookup init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare [10.202.193.30] 2025/06/09 12:51:57 PrimaryIP: 10.202.193.30 2025/06/09 12:51:57 Donor: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:51:57 Opening connection to 10.202.192.25 2025/06/09 12:51:57 Clone required: true 2025/06/09 12:51:57 Checking if a clone in progress 2025/06/09 12:51:57 Clone in progress: false 2025/06/09 12:51:57 Cloning from init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:51:59 Clone finished. Restarting container... kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:51:59 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:03 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 248ms (248ms including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:19 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:20 +0000 UTC Warning Pod init-deploy-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:20 +0000 UTC Normal Pod init-deploy-haproxy-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:20 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulDelete delete Pod init-deploy-haproxy-2 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:22 +0000 UTC Warning Pod init-deploy-haproxy-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:35 +0000 UTC Normal Pod init-deploy-mysql-1 TaintManagerEviction Cancelling deletion of Pod kuttl-test-still-hare/init-deploy-mysql-1 taint-eviction-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:35 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulDelete delete Pod init-deploy-mysql-1 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:35 +0000 UTC Normal Pod init-deploy-orc-1 TaintManagerEviction Cancelling deletion of Pod kuttl-test-still-hare/init-deploy-orc-1 taint-eviction-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:35 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulDelete delete Pod init-deploy-orc-1 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:36 +0000 UTC Normal Pod init-deploy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-mysql-1 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-bcrb default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:36 +0000 UTC Warning Pod init-deploy-mysql-1 FailedAttachVolume Multi-Attach error for volume "pvc-d6f81f99-1517-4b3d-af77-74ee116f1096" Volume is already exclusively attached to one node and can't be attached to another attachdetach-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:36 +0000 UTC Warning Pod init-deploy-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:36 +0000 UTC Normal Pod init-deploy-orc-1 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:52:38 +0000 UTC Warning Pod init-deploy-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:53:58 +0000 UTC Warning Pod init-deploy-haproxy-2 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:53:58 +0000 UTC Warning Pod init-deploy-orc-1 Scheduling FailedScheduling 0/2 nodes are available: 2 node(s) didn't match pod anti-affinity rules. preemption: 0/2 nodes are available: 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:08 +0000 UTC Normal Pod init-deploy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-haproxy-2 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:08 +0000 UTC Normal Pod init-deploy-orc-1 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-orc-1 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Warning Pod init-deploy-haproxy-2 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Warning Pod init-deploy-haproxy-2 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Normal Pod init-deploy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d6f81f99-1517-4b3d-af77-74ee116f1096" attachdetach-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:09 +0000 UTC Warning Pod init-deploy-orc-1 FailedMount MountVolume.SetUp failed for volume "custom" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:10 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:10 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:10 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:11 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 230ms (230ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:11 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:11 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 252ms (252ms including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 241ms (241ms including waiting). Image size: 437250902 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:13 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:14 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:14 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:14 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 206ms (206ms including waiting). Image size: 132962163 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:14 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:14 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 7.01s (7.214s including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 4.446s (7.114s including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:17 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:27 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:27 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:29 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 2.09s (2.09s including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:29 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:29 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:30 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:30 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 130ms (130ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:30 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:30 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 3.603s (3.603s including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 213ms (213ms including waiting). Image size: 102739877 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:31 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:40 +0000 UTC Warning Pod init-deploy-orc-1.spec.containers{orchestrator} Unhealthy Liveness probe failed: Get "http://10.202.192.5:3000/api/lb-check": dial tcp 10.202.192.5:3000: connect: connection refused kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:44 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 187ms (187ms including waiting). Image size: 72481355 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:45 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:45 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-2 Pod init-deploy-mysql-2 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:46 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:46 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-still-hare/datadir-init-deploy-mysql-2" pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:46 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-2 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:49 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-934ffe9b-c8f6-4351-a231-3c4803cc47b1 pd.csi.storage.gke.io_gke-fd9abc9c3cf449329ef6-a8d5-3196-vm_3aaf5bb9-0814-47c5-b26c-a202903c3743 logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:50 +0000 UTC Normal Pod init-deploy-mysql-2 Binding Scheduled Successfully assigned kuttl-test-still-hare/init-deploy-mysql-2 to gke-jen-ps-929-2cac30ca--default-pool-47c33f20-0czp default-scheduler logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:57 +0000 UTC Normal Pod init-deploy-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-934ffe9b-c8f6-4351-a231-3c4803cc47b1" attachdetach-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:59 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:59 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-929-2cac30ca" in 145ms (145ms including waiting). Image size: 115554316 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:59 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:54:59 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:55:01 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:55:25 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 24.103s (24.103s including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:55:26 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:55:26 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:55:26 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:00 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 33.964s (33.964s including waiting). Image size: 437250902 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:00 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:00 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:00 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:03 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 3.585s (3.585s including waiting). Image size: 132962163 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:03 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:03 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:10 +0000 UTC Warning Pod init-deploy-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/09 12:56:08 Waiting for MySQL ready state 2025/06/09 12:56:08 MySQL is ready 2025/06/09 12:56:08 Peers: [3466393762396438.init-deploy-mysql-unready.kuttl-test-still-hare 6139373065363232.init-deploy-mysql-unready.kuttl-test-still-hare 6164623462383737.init-deploy-mysql-unready.kuttl-test-still-hare] 2025/06/09 12:56:08 FQDN: init-deploy-mysql-2.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:56:08 Primary: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare Replicas: [init-deploy-mysql-1.init-deploy-mysql.kuttl-test-still-hare init-deploy-mysql-2.init-deploy-mysql.kuttl-test-still-hare] 2025/06/09 12:56:08 lookup init-deploy-mysql-2 [10.202.192.6] 2025/06/09 12:56:08 PodIP: 10.202.192.6 2025/06/09 12:56:08 lookup init-deploy-mysql-0.init-deploy-mysql.kuttl-test-still-hare [10.202.193.30] 2025/06/09 12:56:08 PrimaryIP: 10.202.193.30 2025/06/09 12:56:08 Donor: init-deploy-mysql-1.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:56:08 Opening connection to 10.202.192.6 2025/06/09 12:56:08 Clone required: true 2025/06/09 12:56:08 Checking if a clone in progress 2025/06/09 12:56:08 Clone in progress: false 2025/06/09 12:56:08 Cloning from init-deploy-mysql-1.init-deploy-mysql.kuttl-test-still-hare 2025/06/09 12:56:10 Clone finished. Restarting container... kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:10 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:56:19 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 137ms (137ms including waiting). Image size: 436552282 bytes. kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:57:24 +0000 UTC Warning PerconaServerMySQL.ps.percona.com init-deploy AsyncReplicationNotReady init-deploy-mysql-2: [not_replicating] ps-controller logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:28 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:29 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:30 +0000 UTC Warning Pod init-deploy-mysql-1.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/09 12:58:30 MySQL state is not ready... kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:31 +0000 UTC Warning Pod init-deploy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/09 12:58:31 MySQL state is not ready... kubelet logger.go:42: 12:58:36 | init-deploy | 2025-06-09 12:58:36 +0000 UTC Warning Pod init-deploy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/09 12:58:36 MySQL state is not ready... kubelet logger.go:42: 12:58:36 | init-deploy | Deleting namespace: kuttl-test-still-hare === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (552.55s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/init-deploy (552.11s) PASS