=== RUN kuttl harness.go:464: starting setup harness.go:255: running tests using configured kubeconfig. harness.go:278: Successful connection to cluster at: https://34.59.59.5 harness.go:363: running tests harness.go:75: going to run test suite with timeout of 180 seconds for each step harness.go:375: testsuite: e2e-tests/tests has 34 tests === RUN kuttl/harness === RUN kuttl/harness/init-deploy === PAUSE kuttl/harness/init-deploy === CONT kuttl/harness/init-deploy logger.go:42: 02:56:05 | init-deploy | Creating namespace: kuttl-test-welcome-marmoset logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | + source ../../functions logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ realpath ../../.. logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++++ pwd logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++ test_name=init-deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export GIT_BRANCH=PR-930 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ GIT_BRANCH=PR-930 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export VERSION=PR-930-3aa50acc logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ VERSION=PR-930-3aa50acc logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++++ which gdate logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | ++++ which date logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ oc get projects logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ : logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ kubectl get nodes logger.go:42: 02:56:05 | init-deploy/0-deploy-operator | +++ grep '^minikube' logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | ++ oc get projects logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + init_temp_dir logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + rm -rf /tmp/kuttl/ps/init-deploy logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/init-deploy logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + deploy_operator logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + destroy_operator logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + true logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + true logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + create_namespace ps-operator logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + local namespace=ps-operator logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + [[ -n '' ]] logger.go:42: 02:56:06 | init-deploy/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 02:56:07 | init-deploy/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 02:56:07 | init-deploy/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 02:56:08 | init-deploy/0-deploy-operator | namespace/ps-operator created logger.go:42: 02:56:08 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy/crd.yaml logger.go:42: 02:56:08 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 02:56:09 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 02:56:10 | init-deploy/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 02:56:10 | init-deploy/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 02:56:10 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy/cw-rbac.yaml logger.go:42: 02:56:11 | init-deploy/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 02:56:11 | init-deploy/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:56:11 | init-deploy/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:56:11 | init-deploy/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:12 | init-deploy/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-930-3aa50acc"' /mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy/cw-operator.yaml logger.go:42: 02:56:13 | init-deploy/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 02:56:13 | init-deploy/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 02:56:13 | init-deploy/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 02:56:13 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-welcome-marmoset apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf/secrets.yaml logger.go:42: 02:56:14 | init-deploy/0-deploy-operator | secret/test-secrets created logger.go:42: 02:56:14 | init-deploy/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 02:56:14 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-welcome-marmoset apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf/ssl-secret.yaml logger.go:42: 02:56:15 | init-deploy/0-deploy-operator | secret/test-ssl created logger.go:42: 02:56:15 | init-deploy/0-deploy-operator | + deploy_client logger.go:42: 02:56:15 | init-deploy/0-deploy-operator | + kubectl -n kuttl-test-welcome-marmoset apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf/client.yaml logger.go:42: 02:56:16 | init-deploy/0-deploy-operator | pod/mysql-client created logger.go:42: 02:56:16 | init-deploy/0-deploy-operator | ResourceQuota:kuttl-test-welcome-marmoset/init-deploy-resource-quota created logger.go:42: 02:56:17 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:56:17 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:56:17 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:56:19 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:56:19 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:56:19 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:56:21 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:56:21 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:56:21 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:56:22 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:56:22 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:56:23 | init-deploy/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 02:56:24 | init-deploy/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 02:56:24 | init-deploy/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 02:56:25 | init-deploy/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 02:56:25 | init-deploy/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 02:56:25 | init-deploy/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 02:56:25 | init-deploy/0-deploy-operator | ASSERT PASS logger.go:42: 02:56:25 | init-deploy/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 02:56:25 | init-deploy/1-create-cluster | starting test step 1-create-cluster logger.go:42: 02:56:25 | init-deploy/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.mysql.resources.limits.cpu="500m"' - \ | yq eval '.spec.mysql.resources.limits.memory="1G"' - \ | yq eval '.spec.mysql.resources.requests.cpu="400m"' - \ | yq eval '.spec.mysql.resources.requests.memory="1G"' - \ | yq eval '.spec.backup.resources.limits.cpu="200m"' - \ | yq eval '.spec.backup.resources.limits.memory="256Mi"' - \ | yq eval '.spec.backup.resources.requests.cpu="100m"' - \ | yq eval '.spec.backup.resources.requests.memory="128Mi"' - \ | yq eval '.spec.toolkit.resources.limits.cpu="150m"' - \ | yq eval '.spec.toolkit.resources.limits.memory="256Mi"' - \ | yq eval '.spec.toolkit.resources.requests.cpu="100m"' - \ | yq eval '.spec.toolkit.resources.requests.memory="128Mi"' - \ | yq eval '.spec.proxy.haproxy.resources.limits.cpu="600m"' - \ | yq eval '.spec.proxy.haproxy.resources.limits.memory="512Mi"' - \ | yq eval '.spec.proxy.haproxy.resources.requests.cpu="500m"' - \ | yq eval '.spec.proxy.haproxy.resources.requests.memory="256Mi"' - \ | yq eval '.spec.orchestrator.resources.limits.cpu="200m"' - \ | yq eval '.spec.orchestrator.resources.limits.memory="256Mi"' - \ | yq eval '.spec.orchestrator.resources.requests.cpu="100m"' - \ | yq eval '.spec.orchestrator.resources.requests.memory="128Mi"' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + source ../../functions logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ realpath ../../.. logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++++ pwd logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ test_name=init-deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export GIT_BRANCH=PR-930 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ GIT_BRANCH=PR-930 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export VERSION=PR-930-3aa50acc logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ VERSION=PR-930-3aa50acc logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++++ which gdate logger.go:42: 02:56:25 | init-deploy/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++++ which date logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ date=/usr/bin/date logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ oc get projects logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ : logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ kubectl get nodes logger.go:42: 02:56:25 | init-deploy/1-create-cluster | +++ grep '^minikube' logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ oc get projects logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + get_cr logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + local name_suffix= logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.requests.cpu="100m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.requests.cpu="400m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.limits.cpu="150m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.limits.memory="1G"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.limits.cpu="500m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.limits.cpu="200m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.limits.memory="256Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.requests.cpu="100m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.requests.memory="128Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.limits.memory="512Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.resources.requests.memory="1G"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.backup.resources.limits.memory="256Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.limits.cpu="200m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + '[' -n '' ']' logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.requests.cpu="500m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.requests.memory="256Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.limits.memory="256Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.requests.cpu="100m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.resources.requests.memory="128Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.proxy.haproxy.resources.limits.cpu="600m"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-930-3aa50acc"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.orchestrator.resources.requests.memory="128Mi"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + kubectl -n kuttl-test-welcome-marmoset apply -f - logger.go:42: 02:56:25 | init-deploy/1-create-cluster | ++ printf '.metadata.name="%s"' init-deploy logger.go:42: 02:56:25 | init-deploy/1-create-cluster | + yq eval '.metadata.name="init-deploy"' /mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy/cr.yaml logger.go:42: 02:56:26 | init-deploy/1-create-cluster | perconaservermysql.ps.percona.com/init-deploy created logger.go:42: 03:00:01 | init-deploy/1-create-cluster | test step completed 1-create-cluster logger.go:42: 03:00:01 | init-deploy/2-write-data | starting test step 2-write-data logger.go:42: 03:00:01 | init-deploy/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 03:00:01 | init-deploy/2-write-data | + source ../../functions logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ realpath ../../.. logger.go:42: 03:00:01 | init-deploy/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:01 | init-deploy/2-write-data | ++++ pwd logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | ++ test_name=init-deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:01 | init-deploy/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ GIT_BRANCH=PR-930 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:01 | init-deploy/2-write-data | ++++ which gdate logger.go:42: 03:00:01 | init-deploy/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:00:01 | init-deploy/2-write-data | ++++ which date logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ date=/usr/bin/date logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ oc get projects logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ : logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ kubectl get nodes logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ grep '^minikube' logger.go:42: 03:00:01 | init-deploy/2-write-data | ++ oc get projects logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ get_cluster_name logger.go:42: 03:00:01 | init-deploy/2-write-data | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:02 | init-deploy/2-write-data | ++ get_haproxy_svc init-deploy logger.go:42: 03:00:02 | init-deploy/2-write-data | ++ local cluster=init-deploy logger.go:42: 03:00:02 | init-deploy/2-write-data | ++ echo init-deploy-haproxy logger.go:42: 03:00:02 | init-deploy/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:02 | init-deploy/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 03:00:02 | init-deploy/2-write-data | + local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:02 | init-deploy/2-write-data | + local pod= logger.go:42: 03:00:02 | init-deploy/2-write-data | ++ get_client_pod logger.go:42: 03:00:02 | init-deploy/2-write-data | ++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:02 | init-deploy/2-write-data | + client_pod=mysql-client logger.go:42: 03:00:02 | init-deploy/2-write-data | + wait_pod mysql-client logger.go:42: 03:00:02 | init-deploy/2-write-data | + local pod=mysql-client logger.go:42: 03:00:02 | init-deploy/2-write-data | + set +o xtrace logger.go:42: 03:00:02 | init-deploy/2-write-data | mysql-clienttrue logger.go:42: 03:00:02 | init-deploy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 03:00:02 | init-deploy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:02 | init-deploy/2-write-data | + kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:03 | init-deploy/2-write-data | + : logger.go:42: 03:00:03 | init-deploy/2-write-data | +++ get_cluster_name logger.go:42: 03:00:03 | init-deploy/2-write-data | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:04 | init-deploy/2-write-data | ++ get_haproxy_svc init-deploy logger.go:42: 03:00:04 | init-deploy/2-write-data | ++ local cluster=init-deploy logger.go:42: 03:00:04 | init-deploy/2-write-data | ++ echo init-deploy-haproxy logger.go:42: 03:00:04 | init-deploy/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:04 | init-deploy/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 03:00:04 | init-deploy/2-write-data | + local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:04 | init-deploy/2-write-data | + local pod= logger.go:42: 03:00:04 | init-deploy/2-write-data | ++ get_client_pod logger.go:42: 03:00:04 | init-deploy/2-write-data | ++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:04 | init-deploy/2-write-data | + client_pod=mysql-client logger.go:42: 03:00:04 | init-deploy/2-write-data | + wait_pod mysql-client logger.go:42: 03:00:04 | init-deploy/2-write-data | + local pod=mysql-client logger.go:42: 03:00:04 | init-deploy/2-write-data | + set +o xtrace logger.go:42: 03:00:05 | init-deploy/2-write-data | mysql-clienttrue logger.go:42: 03:00:05 | init-deploy/2-write-data | + kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:05 | init-deploy/2-write-data | + sed -e 's/mysql: //' logger.go:42: 03:00:05 | init-deploy/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:05 | init-deploy/2-write-data | + : [controller-runtime] log.SetLogger(...) was never called; logs will not be displayed. Detected at: > goroutine 41 [running]: > runtime/debug.Stack() > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e > sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot() > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd > sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002d9c00, {0x184a055, 0x14}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e > github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002d9c00}, 0x0}, {0x184a055?, 0xc0003b1f80?}) > /home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36 > sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc000448a10, {0x1accd90, 0xc0002d8300}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1 > sigs.k8s.io/controller-runtime/pkg/client.New(0xc00041ed88?, {0x0, 0xc000448a10, {0x1accd90, 0xc0002d8300}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d > github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc00041ed88, {0x0, 0xc000448a10, {0x1accd90, 0xc0002d8300}, 0x0, {0x0, 0x0}, 0x0}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127 > github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc00036d208, 0x76?) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e > github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc000351790, 0xc00037a1a0, {0xc0005c2660, 0x1b}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63 > github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc000351790, 0xc00037a1a0, {0xc0005c2660, 0x1b}) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a > github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc000267680, 0xc00037a1a0, 0xc00061f4d0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb > github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc00037a1a0) > /home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e > testing.tRunner(0xc00037a1a0, 0xc000110b10) > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb > created by testing.(*T).Run in goroutine 40 > /nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390 logger.go:42: 03:00:05 | init-deploy/2-write-data | test step completed 2-write-data logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | starting test step 3-read-from-primary logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 03-read-from-primary --from-literal=data="${data}"] logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | + source ../../functions logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ realpath ../../.. logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++++ pwd logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++ test_name=init-deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ GIT_BRANCH=PR-930 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export MINIO_VER=5.4.0 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ MINIO_VER=5.4.0 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++++ which gdate logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | ++++ which date logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ date=/usr/bin/date logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ oc get projects logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ : logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ kubectl get nodes logger.go:42: 03:00:05 | init-deploy/3-read-from-primary | +++ grep '^minikube' logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++ oc get projects logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++++ get_cluster_name logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | +++ get_haproxy_svc init-deploy logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | +++ local cluster=init-deploy logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | +++ echo init-deploy-haproxy logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++ local 'uri=-h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | ++ local pod= logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | +++ get_client_pod logger.go:42: 03:00:06 | init-deploy/3-read-from-primary | +++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ client_pod=mysql-client logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ wait_pod mysql-client logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ local pod=mysql-client logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ set +o xtrace logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | mysql-clienttrue logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-haproxy -uroot -proot_password' logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 03:00:07 | init-deploy/3-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:08 | init-deploy/3-read-from-primary | + data=100500 logger.go:42: 03:00:08 | init-deploy/3-read-from-primary | + kubectl create configmap -n kuttl-test-welcome-marmoset 03-read-from-primary --from-literal=data=100500 logger.go:42: 03:00:08 | init-deploy/3-read-from-primary | configmap/03-read-from-primary created logger.go:42: 03:00:09 | init-deploy/3-read-from-primary | test step completed 3-read-from-primary logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | starting test step 4-read-from-replicas logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | running command: [sh -c set -o errexit set -o xtrace source ../../functions args='' size=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.spec.mysql.size}') for i in $(seq 0 $((size - 1))); do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") args="${args} --from-literal=${host}=${data}" done kubectl create configmap -n "${NAMESPACE}" 04-read-from-replicas ${args}] logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | + source ../../functions logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ realpath ../../.. logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++++ pwd logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++ test_name=init-deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ GIT_BRANCH=PR-930 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export MINIO_VER=5.4.0 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ MINIO_VER=5.4.0 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++++ which gdate logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++++ which date logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ date=/usr/bin/date logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ oc get projects logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ : logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ kubectl get nodes logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ grep '^minikube' logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | ++ oc get projects logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | + args= logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 03:00:09 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-welcome-marmoset get ps init-deploy -o 'jsonpath={.spec.mysql.size}' logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | + size=3 logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | ++ seq 0 2 logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 03:00:10 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 0 logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local index=0 logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-0.init-deploy-mysql logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-0.init-deploy-mysql logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-0.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 03:00:11 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 1 logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local index=1 logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-1.init-deploy-mysql logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-1.init-deploy-mysql logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 03:00:13 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 03:00:14 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 03:00:14 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 03:00:14 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:14 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-1.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | + for i in '$(seq 0 $((size - 1)))' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | +++ get_cluster_name logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ get_mysql_headless_fqdn init-deploy 2 logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ local cluster=init-deploy logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ local index=2 logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ echo init-deploy-mysql-2.init-deploy-mysql logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | + host=init-deploy-mysql-2.init-deploy-mysql logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ local 'uri=-h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | ++ local pod= logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | +++ get_client_pod logger.go:42: 03:00:15 | init-deploy/4-read-from-replicas | +++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ client_pod=mysql-client logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ wait_pod mysql-client logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ local pod=mysql-client logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ set +o xtrace logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | mysql-clienttrue logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h init-deploy-mysql-2.init-deploy-mysql -uroot -proot_password' logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ sed -e 's/mysql: //' logger.go:42: 03:00:16 | init-deploy/4-read-from-replicas | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 03:00:17 | init-deploy/4-read-from-replicas | + data=100500 logger.go:42: 03:00:17 | init-deploy/4-read-from-replicas | + args=' --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-2.init-deploy-mysql=100500' logger.go:42: 03:00:17 | init-deploy/4-read-from-replicas | + kubectl create configmap -n kuttl-test-welcome-marmoset 04-read-from-replicas --from-literal=init-deploy-mysql-0.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-1.init-deploy-mysql=100500 --from-literal=init-deploy-mysql-2.init-deploy-mysql=100500 logger.go:42: 03:00:17 | init-deploy/4-read-from-replicas | configmap/04-read-from-replicas created logger.go:42: 03:00:18 | init-deploy/4-read-from-replicas | test step completed 4-read-from-replicas logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | starting test step 5-check-orchestrator logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | running command: [sh -c set -o errexit set -o xtrace source ../../functions orc_host=$(get_orc_headless_fqdn $(get_cluster_name) 0) cluster=$(run_curl "http://${orc_host}:3000/api/clusters/" | jq -r .[0] | sed "s/.${NAMESPACE}//g") args="--from-literal=cluster=${cluster}" run_curl "http://${orc_host}:3000/api/cluster/${cluster}/" | jq -r .[].Key.Hostname | sed "s/.${NAMESPACE}//g" >"${TEMP_DIR}/instances" args="${args} --from-file=instances=${TEMP_DIR}/instances" kubectl create configmap -n "${NAMESPACE}" 05-check-orchestrator ${args}] logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | + source ../../functions logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ realpath ../../.. logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++++ pwd logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++ test_name=init-deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ GIT_BRANCH=PR-930 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export MINIO_VER=5.4.0 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ MINIO_VER=5.4.0 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++++ which gdate logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++++ which date logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ date=/usr/bin/date logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ oc get projects logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ : logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ kubectl get nodes logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ grep '^minikube' logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | ++ oc get projects logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ get_cluster_name logger.go:42: 03:00:18 | init-deploy/5-check-orchestrator | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ get_orc_headless_fqdn init-deploy 0 logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ local cluster=init-deploy logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ local index=0 logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ echo init-deploy-orc-0.init-deploy-orc logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + orc_host=init-deploy-orc-0.init-deploy-orc logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ run_curl http://init-deploy-orc-0.init-deploy-orc:3000/api/clusters/ logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'curl -s -k http://init-deploy-orc-0.init-deploy-orc:3000/api/clusters/' logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ jq -r '.[0]' logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | ++ sed s/.kuttl-test-welcome-marmoset//g logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + cluster=init-deploy-mysql-0.init-deploy-mysql:3306 logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + args=--from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + jq -r '.[].Key.Hostname' logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + sed s/.kuttl-test-welcome-marmoset//g logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + run_curl http://init-deploy-orc-0.init-deploy-orc:3000/api/cluster/init-deploy-mysql-0.init-deploy-mysql:3306/ logger.go:42: 03:00:19 | init-deploy/5-check-orchestrator | + kubectl -n kuttl-test-welcome-marmoset exec mysql-client -- bash -c 'curl -s -k http://init-deploy-orc-0.init-deploy-orc:3000/api/cluster/init-deploy-mysql-0.init-deploy-mysql:3306/' logger.go:42: 03:00:20 | init-deploy/5-check-orchestrator | + args='--from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 --from-file=instances=/tmp/kuttl/ps/init-deploy/instances' logger.go:42: 03:00:20 | init-deploy/5-check-orchestrator | + kubectl create configmap -n kuttl-test-welcome-marmoset 05-check-orchestrator --from-literal=cluster=init-deploy-mysql-0.init-deploy-mysql:3306 --from-file=instances=/tmp/kuttl/ps/init-deploy/instances logger.go:42: 03:00:20 | init-deploy/5-check-orchestrator | configmap/05-check-orchestrator created logger.go:42: 03:00:21 | init-deploy/5-check-orchestrator | test step completed 5-check-orchestrator logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | starting test step 6-check-async-repl-not-ready-cr-status logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | running command: [sh -c set -o errexit set -o xtrace source ../../functions state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "ready" ]]; then echo "Status state should be ready, but is $state." exit 1 fi run_mysqlsh "STOP REPLICA;" "-h localhost -P 33060 -uroot -proot_password" "init-deploy-mysql-2" sleep 20 state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "initializing" ]]; then echo "Status state should be initializing, but is $state." exit 1 fi run_mysqlsh "START REPLICA;" "-h localhost -P 33060 -uroot -proot_password" "init-deploy-mysql-2" sleep 20 state=$(kubectl -n ${NAMESPACE} get ps $(get_cluster_name) -o jsonpath='{.status.state}') if [[ $state != "ready" ]]; then echo "Status state should be ready, but is $state." exit 1 fi] logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | + source ../../functions logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ realpath ../../.. logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ pwd logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ test_name=init-deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ GIT_BRANCH=PR-930 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export MINIO_VER=5.4.0 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ MINIO_VER=5.4.0 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ which gdate logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++++ which date logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ date=/usr/bin/date logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ oc get projects logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ : logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl get nodes logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ grep '^minikube' logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ oc get projects logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 03:00:21 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-welcome-marmoset get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=ready logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ ready != \r\e\a\d\y ]] logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + run_mysqlsh 'STOP REPLICA;' '-h localhost -P 33060 -uroot -proot_password' init-deploy-mysql-2 logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'command=STOP REPLICA;' logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'uri=-h localhost -P 33060 -uroot -proot_password' logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=init-deploy-mysql-2 logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ get_client_pod logger.go:42: 03:00:22 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + client_pod=mysql-client logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + wait_pod mysql-client logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=mysql-client logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + set +o xtrace logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | mysql-clienttrue logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + kubectl -n kuttl-test-welcome-marmoset exec init-deploy-mysql-2 -- bash -c 'printf '\''%s\n'\'' "STOP REPLICA;" | mysqlsh --sql --quiet-start=2 -h localhost -P 33060 -uroot -proot_password' logger.go:42: 03:00:23 | init-deploy/6-check-async-repl-not-ready-cr-status | + tail -n +2 logger.go:42: 03:00:25 | init-deploy/6-check-async-repl-not-ready-cr-status | Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory logger.go:42: 03:00:25 | init-deploy/6-check-async-repl-not-ready-cr-status | + sleep 20 logger.go:42: 03:00:45 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 03:00:45 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:00:45 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-welcome-marmoset get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=initializing logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ initializing != \i\n\i\t\i\a\l\i\z\i\n\g ]] logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + run_mysqlsh 'START REPLICA;' '-h localhost -P 33060 -uroot -proot_password' init-deploy-mysql-2 logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'command=START REPLICA;' logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + local 'uri=-h localhost -P 33060 -uroot -proot_password' logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=init-deploy-mysql-2 logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ get_client_pod logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-welcome-marmoset get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + client_pod=mysql-client logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + wait_pod mysql-client logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + local pod=mysql-client logger.go:42: 03:00:46 | init-deploy/6-check-async-repl-not-ready-cr-status | + set +o xtrace logger.go:42: 03:00:47 | init-deploy/6-check-async-repl-not-ready-cr-status | mysql-clienttrue logger.go:42: 03:00:47 | init-deploy/6-check-async-repl-not-ready-cr-status | + tail -n +2 logger.go:42: 03:00:47 | init-deploy/6-check-async-repl-not-ready-cr-status | + kubectl -n kuttl-test-welcome-marmoset exec init-deploy-mysql-2 -- bash -c 'printf '\''%s\n'\'' "START REPLICA;" | mysqlsh --sql --quiet-start=2 -h localhost -P 33060 -uroot -proot_password' logger.go:42: 03:00:48 | init-deploy/6-check-async-repl-not-ready-cr-status | Cannot set LC_ALL to locale en_US.UTF-8: No such file or directory logger.go:42: 03:00:48 | init-deploy/6-check-async-repl-not-ready-cr-status | + sleep 20 logger.go:42: 03:01:08 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ get_cluster_name logger.go:42: 03:01:08 | init-deploy/6-check-async-repl-not-ready-cr-status | +++ kubectl -n kuttl-test-welcome-marmoset get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 03:01:09 | init-deploy/6-check-async-repl-not-ready-cr-status | ++ kubectl -n kuttl-test-welcome-marmoset get ps init-deploy -o 'jsonpath={.status.state}' logger.go:42: 03:01:09 | init-deploy/6-check-async-repl-not-ready-cr-status | + state=ready logger.go:42: 03:01:09 | init-deploy/6-check-async-repl-not-ready-cr-status | + [[ ready != \r\e\a\d\y ]] logger.go:42: 03:01:09 | init-deploy/6-check-async-repl-not-ready-cr-status | test step completed 6-check-async-repl-not-ready-cr-status logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | starting test step 7-check-password-leak logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | running command: [sh -c set -o errexit set -o xtrace source ../../functions check_passwords_leak] logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | + source ../../functions logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ realpath ../../.. logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++++ pwd logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++ test_name=init-deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ GIT_BRANCH=PR-930 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export MINIO_VER=5.4.0 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ MINIO_VER=5.4.0 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++++ which gdate logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | ++++ which date logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ date=/usr/bin/date logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ oc get projects logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ : logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ kubectl get nodes logger.go:42: 03:01:09 | init-deploy/7-check-password-leak | +++ grep '^minikube' logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | ++ oc get projects logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + check_passwords_leak logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + local secrets logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + local passwords logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + local pods logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | ++ kubectl get secrets -o json logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | ++ jq -r '.items[].data | to_entries | .[] | select(.key | (endswith(".crt") or endswith(".key") or endswith(".pub") or endswith(".pem") or endswith(".p12") or test("namespace")) | not) | .value' logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + secrets= logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | + passwords=' ' logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pods -o name logger.go:42: 03:01:10 | init-deploy/7-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + pods='init-deploy-haproxy-0 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-haproxy-1 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-haproxy-2 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-mysql-0 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-mysql-1 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-mysql-2 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-orc-0 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-orc-1 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | init-deploy-orc-2 logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | mysql-client' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + collect_logs kuttl-test-welcome-marmoset logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + local containers logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + local count logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + NS=kuttl-test-welcome-marmoset logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-haproxy-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:11 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-0 -c haproxy logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-haproxy.txt logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-haproxy.txt logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-0 -c mysql-monit logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-mysql-monit.txt logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-0-mysql-monit.txt logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:13 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-haproxy-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-1 -c haproxy logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-haproxy.txt logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-haproxy.txt logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:14 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-1 -c mysql-monit logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-mysql-monit.txt logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-1-mysql-monit.txt logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-haproxy-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + containers='haproxy mysql-monit' logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:15 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-2 -c haproxy logger.go:42: 03:01:16 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-haproxy.txt logger.go:42: 03:01:16 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-haproxy.txt logger.go:42: 03:01:16 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:16 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-haproxy-2 -c mysql-monit logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-mysql-monit.txt logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-haproxy-2-mysql-monit.txt logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-mysql-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:17 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-0 -c mysql logger.go:42: 03:01:18 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-mysql.txt logger.go:42: 03:01:18 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-mysql.txt logger.go:42: 03:01:18 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:18 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-0 -c xtrabackup logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-xtrabackup.txt logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-xtrabackup.txt logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-0 -c pt-heartbeat logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-pt-heartbeat.txt logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-0-pt-heartbeat.txt logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:19 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-mysql-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-1 -c mysql logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-mysql.txt logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-mysql.txt logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:20 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-1 -c xtrabackup logger.go:42: 03:01:21 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-xtrabackup.txt logger.go:42: 03:01:21 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-xtrabackup.txt logger.go:42: 03:01:21 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:21 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-1 -c pt-heartbeat logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-pt-heartbeat.txt logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-1-pt-heartbeat.txt logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-mysql-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + containers='mysql xtrabackup pt-heartbeat' logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:22 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-2 -c mysql logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-mysql.txt logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-mysql.txt logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-2 -c xtrabackup logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-xtrabackup.txt logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-xtrabackup.txt logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:23 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-mysql-2 -c pt-heartbeat logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-pt-heartbeat.txt logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-mysql-2-pt-heartbeat.txt logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:24 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-orc-0 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:25 | init-deploy/7-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:25 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:25 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-0 -c orc logger.go:42: 03:01:26 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-orc.txt logger.go:42: 03:01:26 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-orc.txt logger.go:42: 03:01:26 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:26 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-0 -c mysql-monit logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-mysql-monit.txt logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-0-mysql-monit.txt logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-orc-1 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:27 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-1 -c orc logger.go:42: 03:01:28 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-orc.txt logger.go:42: 03:01:28 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-orc.txt logger.go:42: 03:01:28 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:28 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-1 -c mysql-monit logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-mysql-monit.txt logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-1-mysql-monit.txt logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod init-deploy-orc-2 -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + containers='orc mysql-monit' logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:29 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-2 -c orc logger.go:42: 03:01:30 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-orc.txt logger.go:42: 03:01:30 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-orc.txt logger.go:42: 03:01:30 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:30 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs init-deploy-orc-2 -c mysql-monit logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-mysql-monit.txt logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-init-deploy-orc-2-mysql-monit.txt logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | ++ kubectl -n kuttl-test-welcome-marmoset get pod mysql-client -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + containers=mysql-client logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:31 | init-deploy/7-check-password-leak | + kubectl -n kuttl-test-welcome-marmoset logs mysql-client -c mysql-client logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-mysql-client-mysql-client.txt logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-mysql-client-mysql-client.txt logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + '[' -n ps-operator ']' logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | ++ kubectl -n ps-operator get pods -o name logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | ++ awk -F / '{print $2}' logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + pods=percona-server-mysql-operator-5f69ffd4c5-vxcwt logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + collect_logs ps-operator logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + local containers logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + local count logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + NS=ps-operator logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + for p in '$pods' logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | ++ kubectl -n ps-operator get pod percona-server-mysql-operator-5f69ffd4c5-vxcwt -o 'jsonpath={.spec.containers[*].name}' logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + containers=manager logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + for c in '$containers' logger.go:42: 03:01:32 | init-deploy/7-check-password-leak | + kubectl -n ps-operator logs percona-server-mysql-operator-5f69ffd4c5-vxcwt -c manager logger.go:42: 03:01:33 | init-deploy/7-check-password-leak | + echo logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-percona-server-mysql-operator-5f69ffd4c5-vxcwt-manager.txt logger.go:42: 03:01:33 | init-deploy/7-check-password-leak | logs saved in: /tmp/kuttl/ps/init-deploy/logs_output-percona-server-mysql-operator-5f69ffd4c5-vxcwt-manager.txt logger.go:42: 03:01:33 | init-deploy/7-check-password-leak | + echo logger.go:42: 03:01:33 | init-deploy/7-check-password-leak | logger.go:42: 03:01:33 | init-deploy/7-check-password-leak | test step completed 7-check-password-leak logger.go:42: 03:01:33 | init-deploy/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 03:01:34 | init-deploy/98-drop-finalizer | PerconaServerMySQL:kuttl-test-welcome-marmoset/init-deploy updated logger.go:42: 03:01:34 | init-deploy/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/tests/init-deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++ test_name=init-deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/vars.sh logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-930 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-930/e2e-tests/conf logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/init-deploy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-930 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-930 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export VERSION=PR-930-3aa50acc logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ VERSION=PR-930-3aa50acc logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-930-3aa50acc logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-930/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | ++++ which date logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ date=/usr/bin/date logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ : logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 03:01:34 | init-deploy/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 03:01:35 | init-deploy/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 03:01:36 | init-deploy/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 03:01:44 | init-deploy/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 03:01:44 | init-deploy | init-deploy events from ns kuttl-test-welcome-marmoset: logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:16 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/mysql-client to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-pvcp default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:16 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulling Pulling image "percona/percona-server:8.0.33" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:28 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:28 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-welcome-marmoset/datadir-init-deploy-mysql-0" pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:28 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-0 Pod init-deploy-mysql-0 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:28 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-0 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:29 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:29 +0000 UTC Normal Pod init-deploy-orc-0 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-orc-0 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-518z default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:29 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-0 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 140ms (140ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:30 +0000 UTC Normal Pod init-deploy-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:32 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-328d1bca-0bbd-475b-8a9d-f9cbbba7cc81 pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:32 +0000 UTC Normal Pod init-deploy-mysql-0 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-mysql-0 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-pvcp default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 118ms (118ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 102ms (102ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:33 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:36 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Successfully pulled image "percona/percona-server:8.0.33" in 19.565s (19.565s including waiting). Image size: 387098058 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:36 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:36 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:40 +0000 UTC Normal Pod init-deploy-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-328d1bca-0bbd-475b-8a9d-f9cbbba7cc81" attachdetach-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:42 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:42 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 783ms (783ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:43 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:43 +0000 UTC Normal Pod init-deploy-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:46 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 99ms (99ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:51 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 240ms (240ms including waiting). Image size: 436542924 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 266ms (266ms including waiting). Image size: 445635399 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:52 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:56 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 3.962s (3.962s including waiting). Image size: 132952022 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:56 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:56:56 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:20 +0000 UTC Normal Pod init-deploy-orc-1 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-orc-1 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-c7rw default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:20 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:20 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 288ms (288ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:20 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:20 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-1 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:21 +0000 UTC Normal Pod init-deploy-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:23 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:23 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 242ms (242ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:23 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:23 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:23 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:24 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 232ms (232ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:24 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:24 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:26 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:26 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-1 Pod init-deploy-mysql-1 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:26 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-1 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:27 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-welcome-marmoset/datadir-init-deploy-mysql-1" pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:27 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:30 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-c691367f-2c7b-4a8c-9b00-ed6e76b2d1c8 pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:30 +0000 UTC Normal Pod init-deploy-mysql-1 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-mysql-1 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-518z default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:31 +0000 UTC Normal Pod init-deploy-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-haproxy-0 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-pvcp default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:31 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-0 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:32 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:32 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 218ms (218ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:32 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:32 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:35 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:38 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 3.732s (3.732s including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:38 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:38 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:38 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:38 +0000 UTC Normal Pod init-deploy-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-c691367f-2c7b-4a8c-9b00-ed6e76b2d1c8" attachdetach-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:39 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 223ms (223ms including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:39 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:39 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:39 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal Pod init-deploy-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-haproxy-1 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-c7rw default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-1 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 257ms (257ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:40 +0000 UTC Normal Pod init-deploy-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:41 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 275ms (275ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:41 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:41 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 187ms (187ms including waiting). Image size: 436542924 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 173ms (173ms including waiting). Image size: 445635399 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:42 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 112ms (112ms including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 103ms (103ms including waiting). Image size: 132952022 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:43 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:44 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 234ms (234ms including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:44 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:44 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:44 +0000 UTC Normal Pod init-deploy-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-haproxy-2 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-518z default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:44 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulCreate create Pod init-deploy-haproxy-2 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:45 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:45 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 154ms (154ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:45 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:45 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 101ms (101ms including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 201ms (201ms including waiting). Image size: 102736204 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:47 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:55 +0000 UTC Normal Pod init-deploy-orc-2 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-orc-2 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-pvcp default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:55 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulCreate create Pod init-deploy-orc-2 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:56 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:56 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 269ms (269ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:56 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orc-init} Created Created container: orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:57:56 +0000 UTC Normal Pod init-deploy-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:01 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:02 +0000 UTC Warning Pod init-deploy-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/04 02:57:59 Waiting for MySQL ready state 2025/06/04 02:58:00 MySQL is ready 2025/06/04 02:58:00 Peers: [6135323936623536.init-deploy-mysql-unready.kuttl-test-welcome-marmoset 6638383832383366.init-deploy-mysql-unready.kuttl-test-welcome-marmoset] 2025/06/04 02:58:00 FQDN: init-deploy-mysql-1.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:58:00 Primary: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset Replicas: [init-deploy-mysql-1.init-deploy-mysql.kuttl-test-welcome-marmoset] 2025/06/04 02:58:00 lookup init-deploy-mysql-1 [10.139.9.23] 2025/06/04 02:58:00 PodIP: 10.139.9.23 2025/06/04 02:58:00 lookup init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset [10.139.10.11] 2025/06/04 02:58:00 PrimaryIP: 10.139.10.11 2025/06/04 02:58:00 Donor: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:58:00 Opening connection to 10.139.9.23 2025/06/04 02:58:00 Clone required: true 2025/06/04 02:58:00 Checking if a clone in progress 2025/06/04 02:58:00 Clone in progress: false 2025/06/04 02:58:00 Cloning from init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:58:02 Clone finished. Restarting container... kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:02 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 2.175s (2.175s including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orc} Created Created container: orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 229ms (229ms including waiting). Image size: 72477983 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:03 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:04 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:05 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 136ms (136ms including waiting). Image size: 436542924 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:39 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:39 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Claim datadir-init-deploy-mysql-2 Pod init-deploy-mysql-2 in StatefulSet init-deploy-mysql success statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:40 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:40 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-welcome-marmoset/datadir-init-deploy-mysql-2" pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:40 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulCreate create Pod init-deploy-mysql-2 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:43 +0000 UTC Normal PersistentVolumeClaim datadir-init-deploy-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-0e80bdcd-137f-4236-ab91-8948a98a36ad pd.csi.storage.gke.io_gke-af40a9c872974bf2b3a6-be26-dde6-vm_a48c7189-e7f7-4cc3-a564-4b7a6a227ed2 logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:44 +0000 UTC Normal Pod init-deploy-mysql-2 Binding Scheduled Successfully assigned kuttl-test-welcome-marmoset/init-deploy-mysql-2 to gke-jen-ps-930-3aa50acc--default-pool-fb9b3422-c7rw default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:51 +0000 UTC Normal Pod init-deploy-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-0e80bdcd-137f-4236-ab91-8948a98a36ad" attachdetach-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:54 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:54 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-930-3aa50acc" in 151ms (151ms including waiting). Image size: 108787436 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:55 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:55 +0000 UTC Normal Pod init-deploy-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:56 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:56 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 139ms (139ms including waiting). Image size: 436542924 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 139ms (139ms including waiting). Image size: 445635399 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 132ms (132ms including waiting). Image size: 132952022 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:58:57 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:59:16 +0000 UTC Warning Pod init-deploy-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/06/04 02:59:14 Waiting for MySQL ready state 2025/06/04 02:59:15 MySQL is ready 2025/06/04 02:59:15 Peers: [6135323936623536.init-deploy-mysql-unready.kuttl-test-welcome-marmoset 6330336362646463.init-deploy-mysql-unready.kuttl-test-welcome-marmoset 6638383832383366.init-deploy-mysql-unready.kuttl-test-welcome-marmoset] 2025/06/04 02:59:15 FQDN: init-deploy-mysql-2.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:59:15 Primary: init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset Replicas: [init-deploy-mysql-1.init-deploy-mysql.kuttl-test-welcome-marmoset init-deploy-mysql-2.init-deploy-mysql.kuttl-test-welcome-marmoset] 2025/06/04 02:59:15 lookup init-deploy-mysql-2 [10.139.8.27] 2025/06/04 02:59:15 PodIP: 10.139.8.27 2025/06/04 02:59:15 lookup init-deploy-mysql-0.init-deploy-mysql.kuttl-test-welcome-marmoset [10.139.10.11] 2025/06/04 02:59:15 PrimaryIP: 10.139.10.11 2025/06/04 02:59:15 Donor: init-deploy-mysql-1.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:59:15 Opening connection to 10.139.8.27 2025/06/04 02:59:15 Clone required: true 2025/06/04 02:59:15 Checking if a clone in progress 2025/06/04 02:59:15 Clone in progress: false 2025/06/04 02:59:15 Cloning from init-deploy-mysql-1.init-deploy-mysql.kuttl-test-welcome-marmoset 2025/06/04 02:59:16 Clone finished. Restarting container... kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:59:16 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 02:59:20 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 112ms (112ms including waiting). Image size: 436542924 bytes. kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:00:32 +0000 UTC Warning PerconaServerMySQL.ps.percona.com init-deploy AsyncReplicationNotReady init-deploy-mysql-2: [not_replicating] ps-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-haproxy-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal StatefulSet.apps init-deploy-haproxy SuccessfulDelete delete Pod init-deploy-haproxy-1 in StatefulSet init-deploy-haproxy successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:07 +0000 UTC Normal Pod init-deploy-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:08 +0000 UTC Warning Pod init-deploy-haproxy-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:08 +0000 UTC Normal Pod init-deploy-haproxy-1 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:09 +0000 UTC Warning Pod init-deploy-haproxy-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:09 +0000 UTC Warning PerconaServerMySQL.ps.percona.com init-deploy AsyncReplicationNotReady init-deploy-mysql-2: [last_check_invalid] ps-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:11 +0000 UTC Warning Pod init-deploy-mysql-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:11 +0000 UTC Normal Pod init-deploy-mysql-2 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:11 +0000 UTC Normal StatefulSet.apps init-deploy-mysql SuccessfulDelete delete Pod init-deploy-mysql-2 in StatefulSet init-deploy-mysql successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:13 +0000 UTC Warning Pod init-deploy-mysql-2 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:24 +0000 UTC Normal Pod init-deploy-orc-1 TaintManagerEviction Cancelling deletion of Pod kuttl-test-welcome-marmoset/init-deploy-orc-1 taint-eviction-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:24 +0000 UTC Normal StatefulSet.apps init-deploy-orc SuccessfulDelete delete Pod init-deploy-orc-1 in StatefulSet init-deploy-orc successful statefulset-controller logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:25 +0000 UTC Warning Pod init-deploy-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:25 +0000 UTC Normal Pod init-deploy-orc-1 NotTriggerScaleUp pod didn't trigger scale-up: cluster-autoscaler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:26 +0000 UTC Warning Pod init-deploy-orc-1 Scheduling FailedScheduling 0/3 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }, 2 node(s) didn't match pod anti-affinity rules. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod. default-scheduler logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:34 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:34 +0000 UTC Normal Pod init-deploy-haproxy-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:34 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:34 +0000 UTC Normal Pod init-deploy-haproxy-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-orc-0.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{orc} Killing Stopping container orc kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:35 +0000 UTC Normal Pod init-deploy-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:36 +0000 UTC Warning Pod init-deploy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 03:01:36 MySQL state is not ready... kubelet logger.go:42: 03:01:44 | init-deploy | 2025-06-04 03:01:41 +0000 UTC Warning Pod init-deploy-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/06/04 03:01:41 MySQL state is not ready... kubelet logger.go:42: 03:01:44 | init-deploy | Deleting namespace: kuttl-test-welcome-marmoset === NAME kuttl harness.go:407: run tests finished harness.go:515: cleaning up harness.go:572: removing temp folder: "" --- PASS: kuttl (381.25s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/init-deploy (380.81s) PASS