=== RUN kuttl harness.go:459: starting setup harness.go:254: running tests using configured kubeconfig. harness.go:277: Successful connection to cluster at: https://34.58.193.30 harness.go:362: running tests harness.go:74: going to run test suite with timeout of 180 seconds for each step harness.go:374: testsuite: e2e-tests/tests has 38 tests === RUN kuttl/harness === RUN kuttl/harness/recreate === PAUSE kuttl/harness/recreate === CONT kuttl/harness/recreate logger.go:42: 20:30:01 | recreate | Creating namespace: kuttl-test-valid-filly logger.go:42: 20:30:02 | recreate/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 20:30:02 | recreate/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 20:30:02 | recreate/0-deploy-operator | + source ../../functions logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ realpath ../../.. logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++++ pwd logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++ test_name=recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ GIT_BRANCH=PR-993 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export MINIO_VER=5.4.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ MINIO_VER=5.4.0 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ export VAULT_VER=0.16.1 logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ VAULT_VER=0.16.1 logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++++ which gdate logger.go:42: 20:30:02 | recreate/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++++ which date logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ date=/usr/sbin/date logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ oc get projects logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ : logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ kubectl get nodes logger.go:42: 20:30:02 | recreate/0-deploy-operator | +++ grep '^minikube' logger.go:42: 20:30:02 | recreate/0-deploy-operator | ++ oc get projects logger.go:42: 20:30:02 | recreate/0-deploy-operator | + init_temp_dir logger.go:42: 20:30:02 | recreate/0-deploy-operator | + rm -rf /tmp/kuttl/ps/recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/recreate logger.go:42: 20:30:02 | recreate/0-deploy-operator | + deploy_operator logger.go:42: 20:30:02 | recreate/0-deploy-operator | + destroy_operator logger.go:42: 20:30:02 | recreate/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 20:30:03 | recreate/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 20:30:03 | recreate/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found logger.go:42: 20:30:03 | recreate/0-deploy-operator | + true logger.go:42: 20:30:03 | recreate/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 20:30:03 | recreate/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 20:30:03 | recreate/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 20:30:03 | recreate/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found logger.go:42: 20:30:03 | recreate/0-deploy-operator | + true logger.go:42: 20:30:03 | recreate/0-deploy-operator | + [[ -n ps-operator ]] logger.go:42: 20:30:03 | recreate/0-deploy-operator | + create_namespace ps-operator logger.go:42: 20:30:03 | recreate/0-deploy-operator | + local namespace=ps-operator logger.go:42: 20:30:03 | recreate/0-deploy-operator | + [[ -n '' ]] logger.go:42: 20:30:03 | recreate/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found logger.go:42: 20:30:04 | recreate/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator logger.go:42: 20:30:04 | recreate/0-deploy-operator | + kubectl create namespace ps-operator logger.go:42: 20:30:05 | recreate/0-deploy-operator | namespace/ps-operator created logger.go:42: 20:30:05 | recreate/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/crd.yaml logger.go:42: 20:30:06 | recreate/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 20:30:06 | recreate/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 20:30:08 | recreate/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 20:30:08 | recreate/0-deploy-operator | + '[' -n ps-operator ']' logger.go:42: 20:30:08 | recreate/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cw-rbac.yaml logger.go:42: 20:30:09 | recreate/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 20:30:09 | recreate/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 20:30:09 | recreate/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 20:30:10 | recreate/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 20:30:10 | recreate/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged logger.go:42: 20:30:10 | recreate/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 20:30:10 | recreate/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 20:30:10 | recreate/0-deploy-operator | + kubectl -n ps-operator apply -f - logger.go:42: 20:30:10 | recreate/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:10 | recreate/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-993-bd4cc52b"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cw-operator.yaml logger.go:42: 20:30:11 | recreate/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 20:30:11 | recreate/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 20:30:11 | recreate/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 20:30:11 | recreate/0-deploy-operator | + kubectl -n kuttl-test-valid-filly apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf/secrets.yaml logger.go:42: 20:30:12 | recreate/0-deploy-operator | secret/test-secrets created logger.go:42: 20:30:12 | recreate/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 20:30:12 | recreate/0-deploy-operator | + kubectl -n kuttl-test-valid-filly apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf/ssl-secret.yaml logger.go:42: 20:30:13 | recreate/0-deploy-operator | secret/test-ssl created logger.go:42: 20:30:13 | recreate/0-deploy-operator | + deploy_client logger.go:42: 20:30:13 | recreate/0-deploy-operator | + kubectl -n kuttl-test-valid-filly apply -f - logger.go:42: 20:30:13 | recreate/0-deploy-operator | ++ printf '.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:13 | recreate/0-deploy-operator | + yq eval '.spec.containers[0].image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf/client.yaml logger.go:42: 20:30:14 | recreate/0-deploy-operator | pod/mysql-client created logger.go:42: 20:30:15 | recreate/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 20:30:15 | recreate/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 20:30:15 | recreate/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 20:30:17 | recreate/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 20:30:17 | recreate/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 20:30:17 | recreate/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 20:30:19 | recreate/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 20:30:19 | recreate/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 20:30:19 | recreate/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 20:30:21 | recreate/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 20:30:21 | recreate/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 20:30:21 | recreate/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 20:30:23 | recreate/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 20:30:23 | recreate/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 20:30:23 | recreate/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 20:30:23 | recreate/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 20:30:23 | recreate/0-deploy-operator | percona-server-mysql-operator ps-operator 1 logger.go:42: 20:30:23 | recreate/0-deploy-operator | ASSERT PASS logger.go:42: 20:30:23 | recreate/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 20:30:23 | recreate/1-create-cluster | starting test step 1-create-cluster logger.go:42: 20:30:23 | recreate/1-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 20:30:23 | recreate/1-create-cluster | + source ../../functions logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ realpath ../../.. logger.go:42: 20:30:23 | recreate/1-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:23 | recreate/1-create-cluster | ++++ pwd logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:30:23 | recreate/1-create-cluster | ++ test_name=recreate logger.go:42: 20:30:23 | recreate/1-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:30:23 | recreate/1-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ GIT_BRANCH=PR-993 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 20:30:23 | recreate/1-create-cluster | ++++ which gdate logger.go:42: 20:30:23 | recreate/1-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:30:23 | recreate/1-create-cluster | ++++ which date logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ date=/usr/sbin/date logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ oc get projects logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ : logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ kubectl get nodes logger.go:42: 20:30:23 | recreate/1-create-cluster | +++ grep '^minikube' logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ oc get projects logger.go:42: 20:30:24 | recreate/1-create-cluster | + get_cr logger.go:42: 20:30:24 | recreate/1-create-cluster | + local name_suffix= logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.metadata.name="%s"' recreate logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.metadata.name="recreate"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cr.yaml logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + kubectl -n kuttl-test-valid-filly apply -f - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-993-bd4cc52b"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:30:24 | recreate/1-create-cluster | + '[' -n '' ']' logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval - logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 20:30:24 | recreate/1-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 20:30:25 | recreate/1-create-cluster | perconaservermysql.ps.percona.com/recreate created logger.go:42: 20:33:39 | recreate/1-create-cluster | test step completed 1-create-cluster logger.go:42: 20:33:39 | recreate/2-write-data | starting test step 2-write-data logger.go:42: 20:33:39 | recreate/2-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 02-write-data-${i} --from-literal=data="${data}" done] logger.go:42: 20:33:39 | recreate/2-write-data | + source ../../functions logger.go:42: 20:33:39 | recreate/2-write-data | +++ realpath ../../.. logger.go:42: 20:33:39 | recreate/2-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:39 | recreate/2-write-data | ++++ pwd logger.go:42: 20:33:39 | recreate/2-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:33:39 | recreate/2-write-data | ++ test_name=recreate logger.go:42: 20:33:39 | recreate/2-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:33:39 | recreate/2-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:39 | recreate/2-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:33:39 | recreate/2-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:33:39 | recreate/2-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:33:39 | recreate/2-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:33:39 | recreate/2-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:33:39 | recreate/2-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:33:39 | recreate/2-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:33:39 | recreate/2-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:33:39 | recreate/2-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:33:39 | recreate/2-write-data | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:33:39 | recreate/2-write-data | +++ GIT_BRANCH=PR-993 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:33:39 | recreate/2-write-data | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:33:39 | recreate/2-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:33:39 | recreate/2-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:33:39 | recreate/2-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:33:39 | recreate/2-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:33:39 | recreate/2-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:33:39 | recreate/2-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:33:39 | recreate/2-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:33:39 | recreate/2-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 20:33:39 | recreate/2-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 20:33:39 | recreate/2-write-data | ++++ which gdate logger.go:42: 20:33:39 | recreate/2-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:33:39 | recreate/2-write-data | ++++ which date logger.go:42: 20:33:39 | recreate/2-write-data | +++ date=/usr/sbin/date logger.go:42: 20:33:39 | recreate/2-write-data | +++ oc get projects logger.go:42: 20:33:39 | recreate/2-write-data | +++ : logger.go:42: 20:33:39 | recreate/2-write-data | +++ kubectl get nodes logger.go:42: 20:33:39 | recreate/2-write-data | +++ grep '^minikube' logger.go:42: 20:33:39 | recreate/2-write-data | ++ oc get projects logger.go:42: 20:33:39 | recreate/2-write-data | +++ get_cluster_name logger.go:42: 20:33:39 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:33:40 | recreate/2-write-data | ++ get_haproxy_svc recreate logger.go:42: 20:33:40 | recreate/2-write-data | ++ local cluster=recreate logger.go:42: 20:33:40 | recreate/2-write-data | ++ echo recreate-haproxy logger.go:42: 20:33:40 | recreate/2-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:40 | recreate/2-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 20:33:40 | recreate/2-write-data | + local 'uri=-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:40 | recreate/2-write-data | + local pod= logger.go:42: 20:33:40 | recreate/2-write-data | ++ get_client_pod logger.go:42: 20:33:40 | recreate/2-write-data | ++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:33:40 | recreate/2-write-data | + client_pod=mysql-client logger.go:42: 20:33:40 | recreate/2-write-data | + wait_pod mysql-client logger.go:42: 20:33:40 | recreate/2-write-data | + local pod=mysql-client logger.go:42: 20:33:40 | recreate/2-write-data | + set +o xtrace logger.go:42: 20:33:41 | recreate/2-write-data | mysql-clienttrue logger.go:42: 20:33:41 | recreate/2-write-data | + kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:41 | recreate/2-write-data | + sed -e 's/mysql: //' logger.go:42: 20:33:41 | recreate/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:33:42 | recreate/2-write-data | + : logger.go:42: 20:33:42 | recreate/2-write-data | +++ get_cluster_name logger.go:42: 20:33:42 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:33:42 | recreate/2-write-data | ++ get_haproxy_svc recreate logger.go:42: 20:33:42 | recreate/2-write-data | ++ local cluster=recreate logger.go:42: 20:33:42 | recreate/2-write-data | ++ echo recreate-haproxy logger.go:42: 20:33:42 | recreate/2-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:42 | recreate/2-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 20:33:42 | recreate/2-write-data | + local 'uri=-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:42 | recreate/2-write-data | + local pod= logger.go:42: 20:33:42 | recreate/2-write-data | ++ get_client_pod logger.go:42: 20:33:42 | recreate/2-write-data | ++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:33:42 | recreate/2-write-data | + client_pod=mysql-client logger.go:42: 20:33:42 | recreate/2-write-data | + wait_pod mysql-client logger.go:42: 20:33:42 | recreate/2-write-data | + local pod=mysql-client logger.go:42: 20:33:42 | recreate/2-write-data | + set +o xtrace logger.go:42: 20:33:43 | recreate/2-write-data | mysql-clienttrue logger.go:42: 20:33:43 | recreate/2-write-data | + kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h recreate-haproxy -uroot -proot_password' logger.go:42: 20:33:43 | recreate/2-write-data | + sed -e 's/mysql: //' logger.go:42: 20:33:43 | recreate/2-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:33:44 | recreate/2-write-data | + : logger.go:42: 20:33:44 | recreate/2-write-data | + for i in 0 1 2 logger.go:42: 20:33:44 | recreate/2-write-data | +++ get_cluster_name logger.go:42: 20:33:44 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:33:44 | recreate/2-write-data | ++ get_mysql_headless_fqdn recreate 0 logger.go:42: 20:33:44 | recreate/2-write-data | ++ local cluster=recreate logger.go:42: 20:33:44 | recreate/2-write-data | ++ local index=0 logger.go:42: 20:33:44 | recreate/2-write-data | ++ echo recreate-mysql-0.recreate-mysql logger.go:42: 20:33:44 | recreate/2-write-data | + host=recreate-mysql-0.recreate-mysql logger.go:42: 20:33:44 | recreate/2-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:44 | recreate/2-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:33:44 | recreate/2-write-data | ++ local 'uri=-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:44 | recreate/2-write-data | ++ local pod= logger.go:42: 20:33:44 | recreate/2-write-data | +++ get_client_pod logger.go:42: 20:33:44 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:33:45 | recreate/2-write-data | ++ client_pod=mysql-client logger.go:42: 20:33:45 | recreate/2-write-data | ++ wait_pod mysql-client logger.go:42: 20:33:45 | recreate/2-write-data | ++ local pod=mysql-client logger.go:42: 20:33:45 | recreate/2-write-data | ++ set +o xtrace logger.go:42: 20:33:45 | recreate/2-write-data | mysql-clienttrue logger.go:42: 20:33:45 | recreate/2-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:45 | recreate/2-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:33:45 | recreate/2-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:33:46 | recreate/2-write-data | + data=100500 logger.go:42: 20:33:46 | recreate/2-write-data | + kubectl create configmap -n kuttl-test-valid-filly 02-write-data-0 --from-literal=data=100500 logger.go:42: 20:33:46 | recreate/2-write-data | configmap/02-write-data-0 created logger.go:42: 20:33:46 | recreate/2-write-data | + for i in 0 1 2 logger.go:42: 20:33:46 | recreate/2-write-data | +++ get_cluster_name logger.go:42: 20:33:46 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:33:47 | recreate/2-write-data | ++ get_mysql_headless_fqdn recreate 1 logger.go:42: 20:33:47 | recreate/2-write-data | ++ local cluster=recreate logger.go:42: 20:33:47 | recreate/2-write-data | ++ local index=1 logger.go:42: 20:33:47 | recreate/2-write-data | ++ echo recreate-mysql-1.recreate-mysql logger.go:42: 20:33:47 | recreate/2-write-data | + host=recreate-mysql-1.recreate-mysql logger.go:42: 20:33:47 | recreate/2-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:47 | recreate/2-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:33:47 | recreate/2-write-data | ++ local 'uri=-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:47 | recreate/2-write-data | ++ local pod= logger.go:42: 20:33:47 | recreate/2-write-data | +++ get_client_pod logger.go:42: 20:33:47 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:33:47 | recreate/2-write-data | ++ client_pod=mysql-client logger.go:42: 20:33:47 | recreate/2-write-data | ++ wait_pod mysql-client logger.go:42: 20:33:47 | recreate/2-write-data | ++ local pod=mysql-client logger.go:42: 20:33:47 | recreate/2-write-data | ++ set +o xtrace logger.go:42: 20:33:48 | recreate/2-write-data | mysql-clienttrue logger.go:42: 20:33:48 | recreate/2-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:48 | recreate/2-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:33:48 | recreate/2-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:33:49 | recreate/2-write-data | + data=100500 logger.go:42: 20:33:49 | recreate/2-write-data | + kubectl create configmap -n kuttl-test-valid-filly 02-write-data-1 --from-literal=data=100500 logger.go:42: 20:33:49 | recreate/2-write-data | configmap/02-write-data-1 created logger.go:42: 20:33:49 | recreate/2-write-data | + for i in 0 1 2 logger.go:42: 20:33:49 | recreate/2-write-data | +++ get_cluster_name logger.go:42: 20:33:49 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:33:50 | recreate/2-write-data | ++ get_mysql_headless_fqdn recreate 2 logger.go:42: 20:33:50 | recreate/2-write-data | ++ local cluster=recreate logger.go:42: 20:33:50 | recreate/2-write-data | ++ local index=2 logger.go:42: 20:33:50 | recreate/2-write-data | ++ echo recreate-mysql-2.recreate-mysql logger.go:42: 20:33:50 | recreate/2-write-data | + host=recreate-mysql-2.recreate-mysql logger.go:42: 20:33:50 | recreate/2-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:50 | recreate/2-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:33:50 | recreate/2-write-data | ++ local 'uri=-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:50 | recreate/2-write-data | ++ local pod= logger.go:42: 20:33:50 | recreate/2-write-data | +++ get_client_pod logger.go:42: 20:33:50 | recreate/2-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:33:50 | recreate/2-write-data | ++ client_pod=mysql-client logger.go:42: 20:33:50 | recreate/2-write-data | ++ wait_pod mysql-client logger.go:42: 20:33:50 | recreate/2-write-data | ++ local pod=mysql-client logger.go:42: 20:33:50 | recreate/2-write-data | ++ set +o xtrace logger.go:42: 20:33:50 | recreate/2-write-data | mysql-clienttrue logger.go:42: 20:33:50 | recreate/2-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:33:50 | recreate/2-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:33:50 | recreate/2-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:33:51 | recreate/2-write-data | + data=100500 logger.go:42: 20:33:51 | recreate/2-write-data | + kubectl create configmap -n kuttl-test-valid-filly 02-write-data-2 --from-literal=data=100500 logger.go:42: 20:33:52 | recreate/2-write-data | configmap/02-write-data-2 created logger.go:42: 20:33:53 | recreate/2-write-data | test step completed 2-write-data logger.go:42: 20:33:53 | recreate/3-pause | starting test step 3-pause logger.go:42: 20:33:53 | recreate/3-pause | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.pause=true' - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 20:33:53 | recreate/3-pause | + source ../../functions logger.go:42: 20:33:53 | recreate/3-pause | +++ realpath ../../.. logger.go:42: 20:33:53 | recreate/3-pause | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:53 | recreate/3-pause | ++++ pwd logger.go:42: 20:33:53 | recreate/3-pause | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:33:53 | recreate/3-pause | ++ test_name=recreate logger.go:42: 20:33:53 | recreate/3-pause | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:33:53 | recreate/3-pause | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:53 | recreate/3-pause | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:33:53 | recreate/3-pause | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:33:53 | recreate/3-pause | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:33:53 | recreate/3-pause | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:33:53 | recreate/3-pause | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:33:53 | recreate/3-pause | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:33:53 | recreate/3-pause | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:33:53 | recreate/3-pause | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:33:53 | recreate/3-pause | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:33:53 | recreate/3-pause | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:33:53 | recreate/3-pause | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:33:53 | recreate/3-pause | +++ GIT_BRANCH=PR-993 logger.go:42: 20:33:53 | recreate/3-pause | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:33:53 | recreate/3-pause | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:33:53 | recreate/3-pause | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:33:53 | recreate/3-pause | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:33:53 | recreate/3-pause | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:33:53 | recreate/3-pause | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:33:53 | recreate/3-pause | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:33:53 | recreate/3-pause | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:33:53 | recreate/3-pause | +++ export MINIO_VER=5.4.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ MINIO_VER=5.4.0 logger.go:42: 20:33:53 | recreate/3-pause | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:33:53 | recreate/3-pause | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:33:53 | recreate/3-pause | +++ export VAULT_VER=0.16.1 logger.go:42: 20:33:53 | recreate/3-pause | +++ VAULT_VER=0.16.1 logger.go:42: 20:33:53 | recreate/3-pause | ++++ which gdate logger.go:42: 20:33:53 | recreate/3-pause | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:33:53 | recreate/3-pause | ++++ which date logger.go:42: 20:33:53 | recreate/3-pause | +++ date=/usr/sbin/date logger.go:42: 20:33:53 | recreate/3-pause | +++ oc get projects logger.go:42: 20:33:53 | recreate/3-pause | +++ : logger.go:42: 20:33:53 | recreate/3-pause | +++ kubectl get nodes logger.go:42: 20:33:53 | recreate/3-pause | +++ grep '^minikube' logger.go:42: 20:33:54 | recreate/3-pause | ++ oc get projects logger.go:42: 20:33:54 | recreate/3-pause | + get_cr logger.go:42: 20:33:54 | recreate/3-pause | + local name_suffix= logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.pause=true - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.mysql.size=3 - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.metadata.name="%s"' recreate logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.metadata.name="recreate"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cr.yaml logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-993-bd4cc52b"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 20:33:54 | recreate/3-pause | + kubectl -n kuttl-test-valid-filly apply -f - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.orchestrator.size=3 - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 20:33:54 | recreate/3-pause | + '[' -n '' ']' logger.go:42: 20:33:54 | recreate/3-pause | + yq eval - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:33:54 | recreate/3-pause | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 20:33:54 | recreate/3-pause | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 20:33:58 | recreate/3-pause | perconaservermysql.ps.percona.com/recreate configured logger.go:42: 20:34:36 | recreate/3-pause | test step completed 3-pause logger.go:42: 20:34:36 | recreate/4-unpause | starting test step 4-unpause logger.go:42: 20:34:36 | recreate/4-unpause | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.pause=false' - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 20:34:36 | recreate/4-unpause | + source ../../functions logger.go:42: 20:34:36 | recreate/4-unpause | +++ realpath ../../.. logger.go:42: 20:34:36 | recreate/4-unpause | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:34:36 | recreate/4-unpause | ++++ pwd logger.go:42: 20:34:36 | recreate/4-unpause | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:34:36 | recreate/4-unpause | ++ test_name=recreate logger.go:42: 20:34:36 | recreate/4-unpause | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:34:36 | recreate/4-unpause | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:34:36 | recreate/4-unpause | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:34:36 | recreate/4-unpause | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:34:36 | recreate/4-unpause | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:34:36 | recreate/4-unpause | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:34:36 | recreate/4-unpause | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:34:36 | recreate/4-unpause | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:34:36 | recreate/4-unpause | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:34:36 | recreate/4-unpause | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:34:36 | recreate/4-unpause | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:34:36 | recreate/4-unpause | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:34:36 | recreate/4-unpause | +++ GIT_BRANCH=PR-993 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:34:36 | recreate/4-unpause | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:34:36 | recreate/4-unpause | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:34:36 | recreate/4-unpause | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:34:36 | recreate/4-unpause | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:34:36 | recreate/4-unpause | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:34:36 | recreate/4-unpause | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:34:36 | recreate/4-unpause | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export MINIO_VER=5.4.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ MINIO_VER=5.4.0 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:34:36 | recreate/4-unpause | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:34:36 | recreate/4-unpause | +++ export VAULT_VER=0.16.1 logger.go:42: 20:34:36 | recreate/4-unpause | +++ VAULT_VER=0.16.1 logger.go:42: 20:34:36 | recreate/4-unpause | ++++ which gdate logger.go:42: 20:34:36 | recreate/4-unpause | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:34:36 | recreate/4-unpause | ++++ which date logger.go:42: 20:34:36 | recreate/4-unpause | +++ date=/usr/sbin/date logger.go:42: 20:34:36 | recreate/4-unpause | +++ oc get projects logger.go:42: 20:34:36 | recreate/4-unpause | +++ : logger.go:42: 20:34:36 | recreate/4-unpause | +++ kubectl get nodes logger.go:42: 20:34:36 | recreate/4-unpause | +++ grep '^minikube' logger.go:42: 20:34:36 | recreate/4-unpause | ++ oc get projects logger.go:42: 20:34:36 | recreate/4-unpause | + get_cr logger.go:42: 20:34:36 | recreate/4-unpause | + local name_suffix= logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.pause=false - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.mysql.size=3 - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.metadata.name="%s"' recreate logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.metadata.name="recreate"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cr.yaml logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-993-bd4cc52b"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 20:34:36 | recreate/4-unpause | + kubectl -n kuttl-test-valid-filly apply -f - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval .spec.orchestrator.size=3 - logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:34:36 | recreate/4-unpause | + '[' -n '' ']' logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:34:36 | recreate/4-unpause | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 20:34:36 | recreate/4-unpause | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 20:34:38 | recreate/4-unpause | perconaservermysql.ps.percona.com/recreate configured logger.go:42: 20:37:40 | recreate/4-unpause | test step completed 4-unpause logger.go:42: 20:37:40 | recreate/5-write-data | starting test step 5-write-data logger.go:42: 20:37:40 | recreate/5-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100501)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 06-write-data-${i} --from-literal=data="${data}" done] logger.go:42: 20:37:40 | recreate/5-write-data | + source ../../functions logger.go:42: 20:37:40 | recreate/5-write-data | +++ realpath ../../.. logger.go:42: 20:37:40 | recreate/5-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:40 | recreate/5-write-data | ++++ pwd logger.go:42: 20:37:40 | recreate/5-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:37:40 | recreate/5-write-data | ++ test_name=recreate logger.go:42: 20:37:40 | recreate/5-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:37:40 | recreate/5-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:40 | recreate/5-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:37:40 | recreate/5-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:37:40 | recreate/5-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:37:40 | recreate/5-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:37:40 | recreate/5-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:37:40 | recreate/5-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:37:40 | recreate/5-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:37:40 | recreate/5-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:37:40 | recreate/5-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:37:40 | recreate/5-write-data | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:37:40 | recreate/5-write-data | +++ GIT_BRANCH=PR-993 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:37:40 | recreate/5-write-data | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:37:40 | recreate/5-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:37:40 | recreate/5-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:37:40 | recreate/5-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:37:40 | recreate/5-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:37:40 | recreate/5-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:37:40 | recreate/5-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:37:40 | recreate/5-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:37:40 | recreate/5-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 20:37:40 | recreate/5-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 20:37:40 | recreate/5-write-data | ++++ which gdate logger.go:42: 20:37:40 | recreate/5-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:37:40 | recreate/5-write-data | ++++ which date logger.go:42: 20:37:40 | recreate/5-write-data | +++ date=/usr/sbin/date logger.go:42: 20:37:40 | recreate/5-write-data | +++ oc get projects logger.go:42: 20:37:40 | recreate/5-write-data | +++ : logger.go:42: 20:37:40 | recreate/5-write-data | +++ kubectl get nodes logger.go:42: 20:37:40 | recreate/5-write-data | +++ grep '^minikube' logger.go:42: 20:37:41 | recreate/5-write-data | ++ oc get projects logger.go:42: 20:37:41 | recreate/5-write-data | +++ get_cluster_name logger.go:42: 20:37:41 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:37:41 | recreate/5-write-data | ++ get_haproxy_svc recreate logger.go:42: 20:37:41 | recreate/5-write-data | ++ local cluster=recreate logger.go:42: 20:37:41 | recreate/5-write-data | ++ echo recreate-haproxy logger.go:42: 20:37:41 | recreate/5-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100501)' '-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:37:41 | recreate/5-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100501)' logger.go:42: 20:37:41 | recreate/5-write-data | + local 'uri=-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:37:41 | recreate/5-write-data | + local pod= logger.go:42: 20:37:41 | recreate/5-write-data | ++ get_client_pod logger.go:42: 20:37:41 | recreate/5-write-data | ++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:37:42 | recreate/5-write-data | + client_pod=mysql-client logger.go:42: 20:37:42 | recreate/5-write-data | + wait_pod mysql-client logger.go:42: 20:37:42 | recreate/5-write-data | + local pod=mysql-client logger.go:42: 20:37:42 | recreate/5-write-data | + set +o xtrace logger.go:42: 20:37:42 | recreate/5-write-data | mysql-clienttrue logger.go:42: 20:37:42 | recreate/5-write-data | + sed -e 's/mysql: //' logger.go:42: 20:37:42 | recreate/5-write-data | + kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100501)" | mysql -sN -h recreate-haproxy -uroot -proot_password' logger.go:42: 20:37:42 | recreate/5-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:37:43 | recreate/5-write-data | + : logger.go:42: 20:37:43 | recreate/5-write-data | + for i in 0 1 2 logger.go:42: 20:37:43 | recreate/5-write-data | +++ get_cluster_name logger.go:42: 20:37:43 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:37:44 | recreate/5-write-data | ++ get_mysql_headless_fqdn recreate 0 logger.go:42: 20:37:44 | recreate/5-write-data | ++ local cluster=recreate logger.go:42: 20:37:44 | recreate/5-write-data | ++ local index=0 logger.go:42: 20:37:44 | recreate/5-write-data | ++ echo recreate-mysql-0.recreate-mysql logger.go:42: 20:37:44 | recreate/5-write-data | + host=recreate-mysql-0.recreate-mysql logger.go:42: 20:37:44 | recreate/5-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:44 | recreate/5-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:37:44 | recreate/5-write-data | ++ local 'uri=-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:44 | recreate/5-write-data | ++ local pod= logger.go:42: 20:37:44 | recreate/5-write-data | +++ get_client_pod logger.go:42: 20:37:44 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:37:44 | recreate/5-write-data | ++ client_pod=mysql-client logger.go:42: 20:37:44 | recreate/5-write-data | ++ wait_pod mysql-client logger.go:42: 20:37:44 | recreate/5-write-data | ++ local pod=mysql-client logger.go:42: 20:37:44 | recreate/5-write-data | ++ set +o xtrace logger.go:42: 20:37:44 | recreate/5-write-data | mysql-clienttrue logger.go:42: 20:37:44 | recreate/5-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:44 | recreate/5-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:37:44 | recreate/5-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:37:45 | recreate/5-write-data | + data='100500 logger.go:42: 20:37:45 | recreate/5-write-data | 100501' logger.go:42: 20:37:45 | recreate/5-write-data | + kubectl create configmap -n kuttl-test-valid-filly 06-write-data-0 '--from-literal=data=100500 logger.go:42: 20:37:45 | recreate/5-write-data | 100501' logger.go:42: 20:37:46 | recreate/5-write-data | configmap/06-write-data-0 created logger.go:42: 20:37:46 | recreate/5-write-data | + for i in 0 1 2 logger.go:42: 20:37:46 | recreate/5-write-data | +++ get_cluster_name logger.go:42: 20:37:46 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:37:46 | recreate/5-write-data | ++ get_mysql_headless_fqdn recreate 1 logger.go:42: 20:37:46 | recreate/5-write-data | ++ local cluster=recreate logger.go:42: 20:37:46 | recreate/5-write-data | ++ local index=1 logger.go:42: 20:37:46 | recreate/5-write-data | ++ echo recreate-mysql-1.recreate-mysql logger.go:42: 20:37:46 | recreate/5-write-data | + host=recreate-mysql-1.recreate-mysql logger.go:42: 20:37:46 | recreate/5-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:46 | recreate/5-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:37:46 | recreate/5-write-data | ++ local 'uri=-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:46 | recreate/5-write-data | ++ local pod= logger.go:42: 20:37:46 | recreate/5-write-data | +++ get_client_pod logger.go:42: 20:37:46 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:37:47 | recreate/5-write-data | ++ client_pod=mysql-client logger.go:42: 20:37:47 | recreate/5-write-data | ++ wait_pod mysql-client logger.go:42: 20:37:47 | recreate/5-write-data | ++ local pod=mysql-client logger.go:42: 20:37:47 | recreate/5-write-data | ++ set +o xtrace logger.go:42: 20:37:47 | recreate/5-write-data | mysql-clienttrue logger.go:42: 20:37:47 | recreate/5-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:47 | recreate/5-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:37:47 | recreate/5-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:37:48 | recreate/5-write-data | + data='100500 logger.go:42: 20:37:48 | recreate/5-write-data | 100501' logger.go:42: 20:37:48 | recreate/5-write-data | + kubectl create configmap -n kuttl-test-valid-filly 06-write-data-1 '--from-literal=data=100500 logger.go:42: 20:37:48 | recreate/5-write-data | 100501' logger.go:42: 20:37:48 | recreate/5-write-data | configmap/06-write-data-1 created logger.go:42: 20:37:48 | recreate/5-write-data | + for i in 0 1 2 logger.go:42: 20:37:48 | recreate/5-write-data | +++ get_cluster_name logger.go:42: 20:37:48 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:37:49 | recreate/5-write-data | ++ get_mysql_headless_fqdn recreate 2 logger.go:42: 20:37:49 | recreate/5-write-data | ++ local cluster=recreate logger.go:42: 20:37:49 | recreate/5-write-data | ++ local index=2 logger.go:42: 20:37:49 | recreate/5-write-data | ++ echo recreate-mysql-2.recreate-mysql logger.go:42: 20:37:49 | recreate/5-write-data | + host=recreate-mysql-2.recreate-mysql logger.go:42: 20:37:49 | recreate/5-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:49 | recreate/5-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:37:49 | recreate/5-write-data | ++ local 'uri=-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:49 | recreate/5-write-data | ++ local pod= logger.go:42: 20:37:49 | recreate/5-write-data | +++ get_client_pod logger.go:42: 20:37:49 | recreate/5-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:37:49 | recreate/5-write-data | ++ client_pod=mysql-client logger.go:42: 20:37:49 | recreate/5-write-data | ++ wait_pod mysql-client logger.go:42: 20:37:49 | recreate/5-write-data | ++ local pod=mysql-client logger.go:42: 20:37:49 | recreate/5-write-data | ++ set +o xtrace logger.go:42: 20:37:50 | recreate/5-write-data | mysql-clienttrue logger.go:42: 20:37:50 | recreate/5-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:37:50 | recreate/5-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:37:50 | recreate/5-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:37:51 | recreate/5-write-data | + data='100500 logger.go:42: 20:37:51 | recreate/5-write-data | 100501' logger.go:42: 20:37:51 | recreate/5-write-data | + kubectl create configmap -n kuttl-test-valid-filly 06-write-data-2 '--from-literal=data=100500 logger.go:42: 20:37:51 | recreate/5-write-data | 100501' logger.go:42: 20:37:51 | recreate/5-write-data | configmap/06-write-data-2 created logger.go:42: 20:37:52 | recreate/5-write-data | test step completed 5-write-data logger.go:42: 20:37:52 | recreate/7-delete-cluster | starting test step 7-delete-cluster logger.go:42: 20:37:52 | recreate/7-delete-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions kubectl delete ps -n ${NAMESPACE} recreate] logger.go:42: 20:37:52 | recreate/7-delete-cluster | + source ../../functions logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ realpath ../../.. logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++++ pwd logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++ test_name=recreate logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ GIT_BRANCH=PR-993 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++++ which gdate logger.go:42: 20:37:52 | recreate/7-delete-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:37:52 | recreate/7-delete-cluster | ++++ which date logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ date=/usr/sbin/date logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ oc get projects logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ : logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ kubectl get nodes logger.go:42: 20:37:52 | recreate/7-delete-cluster | +++ grep '^minikube' logger.go:42: 20:37:53 | recreate/7-delete-cluster | ++ oc get projects logger.go:42: 20:37:53 | recreate/7-delete-cluster | + kubectl delete ps -n kuttl-test-valid-filly recreate logger.go:42: 20:37:54 | recreate/7-delete-cluster | perconaservermysql.ps.percona.com "recreate" deleted logger.go:42: 20:38:12 | recreate/7-delete-cluster | test step completed 7-delete-cluster logger.go:42: 20:38:12 | recreate/8- | starting test step 8- logger.go:42: 20:38:13 | recreate/8- | test step completed 8- logger.go:42: 20:38:13 | recreate/9-recreate-cluster | starting test step 9-recreate-cluster logger.go:42: 20:38:13 | recreate/9-recreate-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.pause=false' - \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + source ../../functions logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ realpath ../../.. logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++++ pwd logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ test_name=recreate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ GIT_BRANCH=PR-993 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export MINIO_VER=5.4.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ MINIO_VER=5.4.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ export VAULT_VER=0.16.1 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ VAULT_VER=0.16.1 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++++ which gdate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++++ which date logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ date=/usr/sbin/date logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ oc get projects logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ : logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ kubectl get nodes logger.go:42: 20:38:13 | recreate/9-recreate-cluster | +++ grep '^minikube' logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ oc get projects logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + get_cr logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + local name_suffix= logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.pause=false - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.initContainer.image="%s"' perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.initContainer.image="perconalab/percona-server-mysql-operator:PR-993-bd4cc52b"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.metadata.name="%s"' recreate logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.mysql.gracePeriod=30 - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.metadata.name="recreate"' /mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy/cr.yaml logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql8.0"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + kubectl -n kuttl-test-valid-filly apply -f - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest logger.go:42: 20:38:13 | recreate/9-recreate-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router8.0"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup8.0"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + '[' -n '' ']' logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval - logger.go:42: 20:38:13 | recreate/9-recreate-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' - logger.go:42: 20:38:15 | recreate/9-recreate-cluster | perconaservermysql.ps.percona.com/recreate created logger.go:42: 20:41:40 | recreate/9-recreate-cluster | test step completed 9-recreate-cluster logger.go:42: 20:41:40 | recreate/10-write-data | starting test step 10-write-data logger.go:42: 20:41:40 | recreate/10-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "INSERT myDB.myTable (id) VALUES (100502)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" for i in 0 1 2; do host=$(get_mysql_headless_fqdn $(get_cluster_name) $i) data=$(run_mysql "SELECT * FROM myDB.myTable" "-h ${host} -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 11-write-data-${i} --from-literal=data="${data}" done] logger.go:42: 20:41:40 | recreate/10-write-data | + source ../../functions logger.go:42: 20:41:40 | recreate/10-write-data | +++ realpath ../../.. logger.go:42: 20:41:40 | recreate/10-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:40 | recreate/10-write-data | ++++ pwd logger.go:42: 20:41:40 | recreate/10-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:41:40 | recreate/10-write-data | ++ test_name=recreate logger.go:42: 20:41:40 | recreate/10-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:41:40 | recreate/10-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:40 | recreate/10-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:41:40 | recreate/10-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:41:40 | recreate/10-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:41:40 | recreate/10-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:41:40 | recreate/10-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:41:40 | recreate/10-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:41:40 | recreate/10-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:41:40 | recreate/10-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:41:40 | recreate/10-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:41:40 | recreate/10-write-data | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:41:40 | recreate/10-write-data | +++ GIT_BRANCH=PR-993 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:41:40 | recreate/10-write-data | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:41:40 | recreate/10-write-data | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:41:40 | recreate/10-write-data | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:41:40 | recreate/10-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:41:40 | recreate/10-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:41:40 | recreate/10-write-data | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:41:40 | recreate/10-write-data | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export MINIO_VER=5.4.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ MINIO_VER=5.4.0 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:41:40 | recreate/10-write-data | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:41:40 | recreate/10-write-data | +++ export VAULT_VER=0.16.1 logger.go:42: 20:41:40 | recreate/10-write-data | +++ VAULT_VER=0.16.1 logger.go:42: 20:41:40 | recreate/10-write-data | ++++ which gdate logger.go:42: 20:41:40 | recreate/10-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:41:40 | recreate/10-write-data | ++++ which date logger.go:42: 20:41:40 | recreate/10-write-data | +++ date=/usr/sbin/date logger.go:42: 20:41:40 | recreate/10-write-data | +++ oc get projects logger.go:42: 20:41:40 | recreate/10-write-data | +++ : logger.go:42: 20:41:40 | recreate/10-write-data | +++ kubectl get nodes logger.go:42: 20:41:40 | recreate/10-write-data | +++ grep '^minikube' logger.go:42: 20:41:40 | recreate/10-write-data | ++ oc get projects logger.go:42: 20:41:40 | recreate/10-write-data | +++ get_cluster_name logger.go:42: 20:41:40 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:41:41 | recreate/10-write-data | ++ get_haproxy_svc recreate logger.go:42: 20:41:41 | recreate/10-write-data | ++ local cluster=recreate logger.go:42: 20:41:41 | recreate/10-write-data | ++ echo recreate-haproxy logger.go:42: 20:41:41 | recreate/10-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100502)' '-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:41:41 | recreate/10-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100502)' logger.go:42: 20:41:41 | recreate/10-write-data | + local 'uri=-h recreate-haproxy -uroot -proot_password' logger.go:42: 20:41:41 | recreate/10-write-data | + local pod= logger.go:42: 20:41:41 | recreate/10-write-data | ++ get_client_pod logger.go:42: 20:41:41 | recreate/10-write-data | ++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:41:41 | recreate/10-write-data | + client_pod=mysql-client logger.go:42: 20:41:41 | recreate/10-write-data | + wait_pod mysql-client logger.go:42: 20:41:41 | recreate/10-write-data | + local pod=mysql-client logger.go:42: 20:41:41 | recreate/10-write-data | + set +o xtrace logger.go:42: 20:41:42 | recreate/10-write-data | mysql-clienttrue logger.go:42: 20:41:42 | recreate/10-write-data | + kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100502)" | mysql -sN -h recreate-haproxy -uroot -proot_password' logger.go:42: 20:41:42 | recreate/10-write-data | + sed -e 's/mysql: //' logger.go:42: 20:41:42 | recreate/10-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:41:43 | recreate/10-write-data | + : logger.go:42: 20:41:43 | recreate/10-write-data | + for i in 0 1 2 logger.go:42: 20:41:43 | recreate/10-write-data | +++ get_cluster_name logger.go:42: 20:41:43 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:41:43 | recreate/10-write-data | ++ get_mysql_headless_fqdn recreate 0 logger.go:42: 20:41:43 | recreate/10-write-data | ++ local cluster=recreate logger.go:42: 20:41:43 | recreate/10-write-data | ++ local index=0 logger.go:42: 20:41:43 | recreate/10-write-data | ++ echo recreate-mysql-0.recreate-mysql logger.go:42: 20:41:43 | recreate/10-write-data | + host=recreate-mysql-0.recreate-mysql logger.go:42: 20:41:43 | recreate/10-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:43 | recreate/10-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:41:43 | recreate/10-write-data | ++ local 'uri=-h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:43 | recreate/10-write-data | ++ local pod= logger.go:42: 20:41:43 | recreate/10-write-data | +++ get_client_pod logger.go:42: 20:41:43 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:41:44 | recreate/10-write-data | ++ client_pod=mysql-client logger.go:42: 20:41:44 | recreate/10-write-data | ++ wait_pod mysql-client logger.go:42: 20:41:44 | recreate/10-write-data | ++ local pod=mysql-client logger.go:42: 20:41:44 | recreate/10-write-data | ++ set +o xtrace logger.go:42: 20:41:44 | recreate/10-write-data | mysql-clienttrue logger.go:42: 20:41:44 | recreate/10-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-0.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:44 | recreate/10-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:41:44 | recreate/10-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:41:45 | recreate/10-write-data | + data='100500 logger.go:42: 20:41:45 | recreate/10-write-data | 100501 logger.go:42: 20:41:45 | recreate/10-write-data | 100502' logger.go:42: 20:41:45 | recreate/10-write-data | + kubectl create configmap -n kuttl-test-valid-filly 11-write-data-0 '--from-literal=data=100500 logger.go:42: 20:41:45 | recreate/10-write-data | 100501 logger.go:42: 20:41:45 | recreate/10-write-data | 100502' logger.go:42: 20:41:45 | recreate/10-write-data | configmap/11-write-data-0 created logger.go:42: 20:41:45 | recreate/10-write-data | + for i in 0 1 2 logger.go:42: 20:41:45 | recreate/10-write-data | +++ get_cluster_name logger.go:42: 20:41:45 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:41:46 | recreate/10-write-data | ++ get_mysql_headless_fqdn recreate 1 logger.go:42: 20:41:46 | recreate/10-write-data | ++ local cluster=recreate logger.go:42: 20:41:46 | recreate/10-write-data | ++ local index=1 logger.go:42: 20:41:46 | recreate/10-write-data | ++ echo recreate-mysql-1.recreate-mysql logger.go:42: 20:41:46 | recreate/10-write-data | + host=recreate-mysql-1.recreate-mysql logger.go:42: 20:41:46 | recreate/10-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:46 | recreate/10-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:41:46 | recreate/10-write-data | ++ local 'uri=-h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:46 | recreate/10-write-data | ++ local pod= logger.go:42: 20:41:46 | recreate/10-write-data | +++ get_client_pod logger.go:42: 20:41:46 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:41:46 | recreate/10-write-data | ++ client_pod=mysql-client logger.go:42: 20:41:46 | recreate/10-write-data | ++ wait_pod mysql-client logger.go:42: 20:41:46 | recreate/10-write-data | ++ local pod=mysql-client logger.go:42: 20:41:46 | recreate/10-write-data | ++ set +o xtrace logger.go:42: 20:41:47 | recreate/10-write-data | mysql-clienttrue logger.go:42: 20:41:47 | recreate/10-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-1.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:47 | recreate/10-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:41:47 | recreate/10-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:41:48 | recreate/10-write-data | + data='100500 logger.go:42: 20:41:48 | recreate/10-write-data | 100501 logger.go:42: 20:41:48 | recreate/10-write-data | 100502' logger.go:42: 20:41:48 | recreate/10-write-data | + kubectl create configmap -n kuttl-test-valid-filly 11-write-data-1 '--from-literal=data=100500 logger.go:42: 20:41:48 | recreate/10-write-data | 100501 logger.go:42: 20:41:48 | recreate/10-write-data | 100502' logger.go:42: 20:41:48 | recreate/10-write-data | configmap/11-write-data-1 created logger.go:42: 20:41:48 | recreate/10-write-data | + for i in 0 1 2 logger.go:42: 20:41:48 | recreate/10-write-data | +++ get_cluster_name logger.go:42: 20:41:48 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 20:41:48 | recreate/10-write-data | ++ get_mysql_headless_fqdn recreate 2 logger.go:42: 20:41:48 | recreate/10-write-data | ++ local cluster=recreate logger.go:42: 20:41:48 | recreate/10-write-data | ++ local index=2 logger.go:42: 20:41:48 | recreate/10-write-data | ++ echo recreate-mysql-2.recreate-mysql logger.go:42: 20:41:48 | recreate/10-write-data | + host=recreate-mysql-2.recreate-mysql logger.go:42: 20:41:48 | recreate/10-write-data | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:48 | recreate/10-write-data | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 20:41:48 | recreate/10-write-data | ++ local 'uri=-h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:48 | recreate/10-write-data | ++ local pod= logger.go:42: 20:41:48 | recreate/10-write-data | +++ get_client_pod logger.go:42: 20:41:48 | recreate/10-write-data | +++ kubectl -n kuttl-test-valid-filly get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 20:41:49 | recreate/10-write-data | ++ client_pod=mysql-client logger.go:42: 20:41:49 | recreate/10-write-data | ++ wait_pod mysql-client logger.go:42: 20:41:49 | recreate/10-write-data | ++ local pod=mysql-client logger.go:42: 20:41:49 | recreate/10-write-data | ++ set +o xtrace logger.go:42: 20:41:49 | recreate/10-write-data | mysql-clienttrue logger.go:42: 20:41:49 | recreate/10-write-data | ++ kubectl -n kuttl-test-valid-filly exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h recreate-mysql-2.recreate-mysql -uroot -proot_password' logger.go:42: 20:41:49 | recreate/10-write-data | ++ sed -e 's/mysql: //' logger.go:42: 20:41:49 | recreate/10-write-data | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 20:41:50 | recreate/10-write-data | + data='100500 logger.go:42: 20:41:50 | recreate/10-write-data | 100501 logger.go:42: 20:41:50 | recreate/10-write-data | 100502' logger.go:42: 20:41:50 | recreate/10-write-data | + kubectl create configmap -n kuttl-test-valid-filly 11-write-data-2 '--from-literal=data=100500 logger.go:42: 20:41:50 | recreate/10-write-data | 100501 logger.go:42: 20:41:50 | recreate/10-write-data | 100502' logger.go:42: 20:41:51 | recreate/10-write-data | configmap/11-write-data-2 created logger.go:42: 20:41:52 | recreate/10-write-data | test step completed 10-write-data logger.go:42: 20:41:52 | recreate/98-drop-finalizer | starting test step 98-drop-finalizer logger.go:42: 20:41:53 | recreate/98-drop-finalizer | PerconaServerMySQL:kuttl-test-valid-filly/recreate updated logger.go:42: 20:41:53 | recreate/98-drop-finalizer | test step completed 98-drop-finalizer logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_operator] logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | + source ../../functions logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ realpath ../../.. logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++++ pwd logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/tests/recreate logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++ test_name=recreate logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/vars.sh logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-993 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/deploy logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-993/e2e-tests/conf logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/recreate logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-993 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-993 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export VERSION=PR-993-bd4cc52b logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ VERSION=PR-993-bd4cc52b logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-993-bd4cc52b logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router8.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.3 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.17.2 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.17.2 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export MINIO_VER=5.4.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ MINIO_VER=5.4.0 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export CHAOS_MESH_VER=2.7.2 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ CHAOS_MESH_VER=2.7.2 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ export VAULT_VER=0.16.1 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ VAULT_VER=0.16.1 logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++++ which gdate logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-993/bin/:/root/.krew/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/var/lib/snapd/snap/bin) logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | ++++ which date logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ date=/usr/sbin/date logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ oc get projects logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ : logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ kubectl get nodes logger.go:42: 20:41:53 | recreate/99-remove-cluster-gracefully | +++ grep '^minikube' logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | ++ oc get projects logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | + destroy_operator logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0 logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | + [[ -n ps-operator ]] logger.go:42: 20:41:54 | recreate/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0 logger.go:42: 20:41:55 | recreate/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely. logger.go:42: 20:41:55 | recreate/99-remove-cluster-gracefully | namespace "ps-operator" force deleted logger.go:42: 20:42:02 | recreate/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully logger.go:42: 20:42:02 | recreate | recreate events from ns kuttl-test-valid-filly: logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:14 +0000 UTC Normal Pod mysql-client Binding Scheduled Successfully assigned kuttl-test-valid-filly/mysql-client to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:15 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "perconalab/percona-server-mysql-operator:main-psmysql8.0" already present on machine kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:15 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container: mysql-client kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:15 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:26 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:26 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-0 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:26 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-valid-filly/datadir-recreate-mysql-0" pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:26 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Claim datadir-recreate-mysql-0 Pod recreate-mysql-0 in StatefulSet recreate-mysql success statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:26 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-0 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:27 +0000 UTC Normal Pod recreate-orc-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:27 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-0 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:27 +0000 UTC Normal PodDisruptionBudget.policy recreate-orchestrator NoPods No matching pods found controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:27 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Error -> Initializing ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:28 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:28 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 213ms (213ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:28 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:28 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:29 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-fd988da2-d5bb-4035-8a7d-9fcb74c86699 pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-mysql-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 115ms (115ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 106ms (106ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:30 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:38 +0000 UTC Normal Pod recreate-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-fd988da2-d5bb-4035-8a7d-9fcb74c86699" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:39 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:39 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 115ms (115ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:39 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:39 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 115ms (115ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 117ms (117ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 121ms (121ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:30:41 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:03 +0000 UTC Normal Pod recreate-orc-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:03 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-1 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:04 +0000 UTC Warning Pod recreate-orc-1 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:04 +0000 UTC Warning Pod recreate-orc-1 FailedMount MountVolume.SetUp failed for volume "custom" : failed to sync configmap cache: timed out waiting for the condition kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:05 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:05 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 185ms (185ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:05 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:05 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 214ms (214ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 193ms (193ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:08 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:14 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:14 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-1 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:14 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-valid-filly/datadir-recreate-mysql-1" pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:14 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Claim datadir-recreate-mysql-1 Pod recreate-mysql-1 in StatefulSet recreate-mysql success statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:14 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-1 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:15 +0000 UTC Normal Pod recreate-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:15 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-0 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:16 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:16 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 117ms (117ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:16 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:16 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-2d89d63d-47d5-4b44-8619-0bc2d0710e69 pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 143ms (143ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 221ms (221ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:18 +0000 UTC Normal Pod recreate-mysql-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:19 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:20 +0000 UTC Normal Pod recreate-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:20 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-1 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:21 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:21 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 232ms (232ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:21 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:21 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 167ms (167ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 188ms (188ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:23 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:24 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal Pod recreate-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 151ms (151ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:25 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-2 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:26 +0000 UTC Normal Pod recreate-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2d89d63d-47d5-4b44-8619-0bc2d0710e69" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:27 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:27 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 182ms (182ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:27 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:27 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 121ms (121ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 186ms (186ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:28 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:29 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 183ms (183ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 198ms (198ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 244ms (244ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:30 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:31 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:36 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:40 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:40 +0000 UTC Normal Pod recreate-orc-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:40 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:40 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-2 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:41 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:41 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 216ms (216ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:41 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:41 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 2483ef54db733148140fab133a403749323fd44c864d9b9ecd57188d0357b3bb not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 148ms (148ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 108ms (108ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 97ms (97ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:42 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:46 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:46 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:46 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task b4e9b1a8b09ca6eb0a4d28b3b8d79a15fcdc97c3a07d867ecb73ac0bb8bb374d not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:46 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 214ms (214ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:48 +0000 UTC Warning Pod recreate-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:31:47 Waiting for MySQL ready state 2025/08/26 20:31:47 MySQL is ready 2025/08/26 20:31:47 Peers: [6139323633346634.recreate-mysql-unready.kuttl-test-valid-filly 6162333461316231.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:31:47 FQDN: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:31:47 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:31:47 lookup recreate-mysql-1 [10.155.248.17] 2025/08/26 20:31:47 PodIP: 10.155.248.17 2025/08/26 20:31:47 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.250.17] 2025/08/26 20:31:47 PrimaryIP: 10.155.250.17 2025/08/26 20:31:47 Donor: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:31:47 Opening connection to 10.155.248.17 2025/08/26 20:31:47 Clone required: true 2025/08/26 20:31:47 Checking if a clone in progress 2025/08/26 20:31:47 Clone in progress: false 2025/08/26 20:31:47 Cloning from recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:31:48 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:48 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:50 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:50 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task c545205637917529561f6fb5bbd7a962b1c86d7311cc4b87ea88c8e449395047 not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:50 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 256ms (256ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:31:51 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 213ms (213ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:23 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:23 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-2 ExternalProvisioning Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered. persistentvolume-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:23 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-valid-filly/datadir-recreate-mysql-2" pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:23 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Claim datadir-recreate-mysql-2 Pod recreate-mysql-2 in StatefulSet recreate-mysql success statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:23 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-2 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:27 +0000 UTC Normal PersistentVolumeClaim datadir-recreate-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-e02bda6a-8fd3-4138-88b8-84ad02baa6f1 pd.csi.storage.gke.io_gke-f3dffe12ee5b4baeac9e-2fa9-90d7-vm_58e7fc8f-3c37-469c-9d46-f567ca1133f0 logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:27 +0000 UTC Normal Pod recreate-mysql-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:35 +0000 UTC Normal Pod recreate-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e02bda6a-8fd3-4138-88b8-84ad02baa6f1" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:36 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:36 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 212ms (212ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:36 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:36 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 208ms (208ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 202ms (202ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:38 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:39 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 192ms (192ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:39 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:39 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:57 +0000 UTC Warning Pod recreate-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:32:56 Waiting for MySQL ready state 2025/08/26 20:32:56 MySQL is ready 2025/08/26 20:32:56 Peers: [3932323430623365.recreate-mysql-unready.kuttl-test-valid-filly 6139323633346634.recreate-mysql-unready.kuttl-test-valid-filly 6162333461316231.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:32:56 FQDN: recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:32:56 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:32:56 lookup recreate-mysql-2 [10.155.249.26] 2025/08/26 20:32:56 PodIP: 10.155.249.26 2025/08/26 20:32:56 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.250.17] 2025/08/26 20:32:56 PrimaryIP: 10.155.250.17 2025/08/26 20:32:56 Donor: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:32:56 Opening connection to 10.155.249.26 2025/08/26 20:32:56 Clone required: true 2025/08/26 20:32:56 Checking if a clone in progress 2025/08/26 20:32:56 Clone in progress: false 2025/08/26 20:32:56 Cloning from recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:32:57 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:32:57 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:00 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 203ms (203ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:36 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:58 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:58 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulDelete delete Pod recreate-mysql-2 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulDelete delete Pod recreate-haproxy-2 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulDelete delete Pod recreate-orc-2 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:33:59 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Ready -> Stopping ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:00 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulDelete delete Pod recreate-haproxy-1 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:00 +0000 UTC Warning Pod recreate-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.155.250.19:3000/api/health": dial tcp 10.155.250.19:3000: connect: connection refused kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:01 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulDelete delete Pod recreate-haproxy-0 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:02 +0000 UTC Normal PodDisruptionBudget.policy recreate-haproxy NoPods No matching pods found controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:02 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:02 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulDelete delete Pod recreate-mysql-1 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:03 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:03 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:07 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:07 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:07 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:07 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulDelete delete Pod recreate-mysql-0 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:07 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Stopping -> Paused ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:09 +0000 UTC Warning Pod recreate-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/26 20:34:09 MySQL state is not ready... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:14 +0000 UTC Warning Pod recreate-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:24 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:24 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:24 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulDelete delete Pod recreate-orc-1 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:25 +0000 UTC Warning Pod recreate-orc-1.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.155.249.24:3000/api/health": dial tcp 10.155.249.24:3000: connect: connection refused kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:27 +0000 UTC Normal PodDisruptionBudget.policy recreate-mysql NoPods No matching pods found controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:31 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:31 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:31 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulDelete delete Pod recreate-orc-0 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:38 +0000 UTC Normal Pod recreate-orc-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:38 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Paused -> Initializing ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:39 +0000 UTC Normal Pod recreate-mysql-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:39 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:39 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 240ms (240ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:39 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:39 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:41 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:41 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 193ms (193ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:41 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:42 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:42 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:42 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 203ms (203ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:42 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:42 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:46 +0000 UTC Normal Pod recreate-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-fd988da2-d5bb-4035-8a7d-9fcb74c86699" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:50 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:50 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 375ms (375ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:50 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:50 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 185ms (185ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:52 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 226ms (226ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 170ms (170ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:34:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:14 +0000 UTC Normal Pod recreate-orc-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:15 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:15 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 213ms (213ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:15 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:15 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:17 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:17 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 200ms (200ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:17 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:17 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:17 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:18 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 172ms (172ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:18 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:18 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:25 +0000 UTC Normal Pod recreate-mysql-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:30 +0000 UTC Normal Pod recreate-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:30 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:31 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 202ms (202ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:31 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:31 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:32 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:32 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 144ms (144ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:32 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:32 +0000 UTC Normal Pod recreate-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2d89d63d-47d5-4b44-8619-0bc2d0710e69" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 196ms (196ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:33 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:34 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:34 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 227ms (227ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:34 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:34 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 222ms (223ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:35 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 200ms (200ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 188ms (188ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 192ms (192ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:36 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 188ms (188ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:37 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 221ms (221ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:38 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:38 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:38 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:39 +0000 UTC Normal Pod recreate-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:40 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:40 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 247ms (247ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:40 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:40 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:42 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:42 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 185ms (185ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:42 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:42 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:42 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:43 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 182ms (182ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:43 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:43 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:43 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:45 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:50 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:50 +0000 UTC Normal Pod recreate-orc-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:51 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:51 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 202ms (202ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:51 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:51 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:52 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:52 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 209ms (209ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:52 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:52 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:52 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:53 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 212ms (212ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:53 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:53 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:55 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:55 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 35de504a1bd44f8ffa8cae79990420e5449d5eb0b0f8ddf75e338df04e589c8b not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:55 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:55 +0000 UTC Warning Pod recreate-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:35:54 Waiting for MySQL ready state 2025/08/26 20:35:54 MySQL is ready 2025/08/26 20:35:54 Peers: [3830393836323865.recreate-mysql-unready.kuttl-test-valid-filly 6262656636353930.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:35:54 FQDN: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:35:54 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:35:54 lookup recreate-mysql-1 [10.155.250.22] 2025/08/26 20:35:54 PodIP: 10.155.250.22 2025/08/26 20:35:54 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.248.18] 2025/08/26 20:35:54 PrimaryIP: 10.155.248.18 2025/08/26 20:35:54 Donor: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:35:54 Opening connection to 10.155.250.22 2025/08/26 20:35:54 Clone required: true 2025/08/26 20:35:54 Checking if a clone in progress 2025/08/26 20:35:54 Clone in progress: false 2025/08/26 20:35:54 Cloning from recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:35:55 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:55 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:56 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 190ms (190ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:35:58 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 187ms (187ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:00 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:00 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 4bd1eb31bab12b3e2fdea8bfa10867a004c84842bb121ffddfbd59d48c1c3cc9 not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:00 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 171ms (171ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:05 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:05 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 75b8f1c64ca9b83d044bea7430ffe4fa8ed5347fd8ba65c959d72aba1955b7e2 not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:05 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 210ms (210ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:29 +0000 UTC Normal Pod recreate-mysql-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:36 +0000 UTC Normal Pod recreate-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e02bda6a-8fd3-4138-88b8-84ad02baa6f1" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:38 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:38 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 232ms (232ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:38 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:38 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:40 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 222ms (222ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 253ms (253ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 164ms (164ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:41 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:59 +0000 UTC Warning Pod recreate-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:36:58 Waiting for MySQL ready state 2025/08/26 20:36:58 MySQL is ready 2025/08/26 20:36:58 Peers: [3830393836323865.recreate-mysql-unready.kuttl-test-valid-filly 3930616132336531.recreate-mysql-unready.kuttl-test-valid-filly 6262656636353930.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:36:58 FQDN: recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:36:58 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:36:58 lookup recreate-mysql-2 [10.155.249.29] 2025/08/26 20:36:58 PodIP: 10.155.249.29 2025/08/26 20:36:58 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.248.18] 2025/08/26 20:36:58 PrimaryIP: 10.155.248.18 2025/08/26 20:36:58 Donor: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:36:58 Opening connection to 10.155.249.29 2025/08/26 20:36:58 Clone required: true 2025/08/26 20:36:58 Checking if a clone in progress 2025/08/26 20:36:58 Clone in progress: false 2025/08/26 20:36:58 Cloning from recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:36:59 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:36:59 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:02 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 203ms (203ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:59 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:59 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:37:59 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:10 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:10 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:10 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Warning PodDisruptionBudget.policy recreate-mysql CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "recreate-mysql-0" controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:11 +0000 UTC Warning Pod recreate-orc-2.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.155.248.20:3000/api/health": dial tcp 10.155.248.20:3000: connect: connection refused kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:14 +0000 UTC Warning Pod recreate-orc-0.spec.containers{orchestrator} Unhealthy Readiness probe failed: Get "http://10.155.250.20:3000/api/health": dial tcp 10.155.250.20:3000: connect: connection refused kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:15 +0000 UTC Warning Pod recreate-mysql-0.spec.containers{mysql} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:16 +0000 UTC Warning PodDisruptionBudget.policy recreate-mysql CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "recreate-mysql-0" controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:16 +0000 UTC Normal Pod recreate-orc-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:16 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-0 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:16 +0000 UTC Warning PodDisruptionBudget.policy recreate-orchestrator CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "recreate-orc-2" controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:16 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Error -> Initializing ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:17 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:17 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 167ms (167ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:17 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:17 +0000 UTC Normal Pod recreate-orc-0.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 130ms (130ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 186ms (186ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:19 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:31 +0000 UTC Normal PodDisruptionBudget.policy recreate-mysql NoPods No matching pods found controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:36 +0000 UTC Normal Pod recreate-mysql-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:36 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-0 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:50 +0000 UTC Normal Pod recreate-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-fd988da2-d5bb-4035-8a7d-9fcb74c86699" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:51 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:52 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 196ms (196ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:52 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:52 +0000 UTC Normal Pod recreate-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-orc-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 208ms (208ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal Pod recreate-orc-1.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:53 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-1 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 196ms (196ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 196ms (196ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 194ms (194ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 200ms (200ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 199ms (199ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:38:56 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:27 +0000 UTC Normal Pod recreate-mysql-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:27 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-1 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:28 +0000 UTC Normal Pod recreate-orc-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-orc-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:28 +0000 UTC Normal StatefulSet.apps recreate-orc SuccessfulCreate create Pod recreate-orc-2 in StatefulSet recreate-orc successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:29 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:29 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 173ms (173ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:29 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Created Created container: orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:29 +0000 UTC Normal Pod recreate-orc-2.spec.initContainers{orchestrator-init} Started Started container orchestrator-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:30 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 188ms (188ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Created Created container: orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Started Started container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 217ms (217ms including waiting). Image size: 72469809 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:31 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:33 +0000 UTC Normal Pod recreate-haproxy-0 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-0 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-hpjc default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:33 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:33 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-0 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:34 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 190ms (191ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:34 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:34 +0000 UTC Normal Pod recreate-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:34 +0000 UTC Normal Pod recreate-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2d89d63d-47d5-4b44-8619-0bc2d0710e69" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:35 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:35 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 169ms (169ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:35 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:35 +0000 UTC Normal Pod recreate-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:36 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 980ms (980ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 180ms (180ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:37 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 191ms (191ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 217ms (217ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:38 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-haproxy-1 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-1 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-xh3r default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 210ms (210ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-1 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 212ms (212ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:39 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:41 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 132ms (132ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 153ms (153ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:42 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal Pod recreate-haproxy-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-haproxy-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 218ms (218ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Created Created container: haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal Pod recreate-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:44 +0000 UTC Normal StatefulSet.apps recreate-haproxy SuccessfulCreate create Pod recreate-haproxy-2 in StatefulSet recreate-haproxy successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 159ms (159ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Created Created container: haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 137ms (137ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Created Created container: mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:46 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:47 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:48 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:54 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:56 +0000 UTC Warning Pod recreate-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:39:55 Waiting for MySQL ready state 2025/08/26 20:39:55 MySQL is ready 2025/08/26 20:39:55 Peers: [3637343639366337.recreate-mysql-unready.kuttl-test-valid-filly 66363232653365.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:39:55 FQDN: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:39:55 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:39:55 lookup recreate-mysql-1 [10.155.250.25] 2025/08/26 20:39:55 PodIP: 10.155.250.25 2025/08/26 20:39:55 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.248.21] 2025/08/26 20:39:55 PrimaryIP: 10.155.248.21 2025/08/26 20:39:55 Donor: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:39:55 Opening connection to 10.155.250.25 2025/08/26 20:39:55 Clone required: true 2025/08/26 20:39:55 Checking if a clone in progress 2025/08/26 20:39:55 Clone in progress: false 2025/08/26 20:39:55 Cloning from recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:39:56 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:56 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:59 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:59 +0000 UTC Warning Pod recreate-haproxy-0.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 72b3ccf91ec753a7270f41ba6d7aaec570cc98eed1b8477ada2016b345fc8337 not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:59 +0000 UTC Normal Pod recreate-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 162ms (162ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:39:59 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Liveness probe failed: ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:00 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 170ms (170ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:04 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:04 +0000 UTC Warning Pod recreate-haproxy-1.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 151fd76b780396605ee274d11a88866d10d22044ee4a54b648316b29513b740e not found: not found kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:04 +0000 UTC Normal Pod recreate-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 202ms (202ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:09 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Killing Container haproxy failed liveness probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:09 +0000 UTC Normal Pod recreate-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 209ms (209ms including waiting). Image size: 105401394 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:10 +0000 UTC Warning Pod recreate-haproxy-2.spec.containers{haproxy} Unhealthy Readiness probe errored: rpc error: code = Unknown desc = failed to exec in container: container is in CONTAINER_EXITED state kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:31 +0000 UTC Normal Pod recreate-mysql-2 Binding Scheduled Successfully assigned kuttl-test-valid-filly/recreate-mysql-2 to gke-jen-ps-993-bd4cc52b--default-pool-a7052371-q2rd default-scheduler logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:31 +0000 UTC Normal StatefulSet.apps recreate-mysql SuccessfulCreate create Pod recreate-mysql-2 in StatefulSet recreate-mysql successful statefulset-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:39 +0000 UTC Normal Pod recreate-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-e02bda6a-8fd3-4138-88b8-84ad02baa6f1" attachdetach-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:41 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:41 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-993-bd4cc52b" in 157ms (157ms including waiting). Image size: 109002393 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:41 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Created Created container: mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:41 +0000 UTC Normal Pod recreate-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 219ms (219ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Created Created container: mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup8.0" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup8.0" in 220ms (220ms including waiting). Image size: 428922326 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Created Created container: xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:43 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:44 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 194ms (194ms including waiting). Image size: 132941919 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:44 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Created Created container: pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:40:44 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:01 +0000 UTC Warning Pod recreate-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: 2025/08/26 20:41:00 Waiting for MySQL ready state 2025/08/26 20:41:00 MySQL is ready 2025/08/26 20:41:00 Peers: [3637343639366337.recreate-mysql-unready.kuttl-test-valid-filly 6232313837323039.recreate-mysql-unready.kuttl-test-valid-filly 66363232653365.recreate-mysql-unready.kuttl-test-valid-filly] 2025/08/26 20:41:00 FQDN: recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:41:00 Primary: recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly Replicas: [recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly recreate-mysql-2.recreate-mysql.kuttl-test-valid-filly] 2025/08/26 20:41:00 lookup recreate-mysql-2 [10.155.249.32] 2025/08/26 20:41:00 PodIP: 10.155.249.32 2025/08/26 20:41:00 lookup recreate-mysql-0.recreate-mysql.kuttl-test-valid-filly [10.155.248.21] 2025/08/26 20:41:00 PrimaryIP: 10.155.248.21 2025/08/26 20:41:00 Donor: recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:41:00 Opening connection to 10.155.249.32 2025/08/26 20:41:01 Clone required: true 2025/08/26 20:41:01 Checking if a clone in progress 2025/08/26 20:41:01 Clone in progress: false 2025/08/26 20:41:01 Cloning from recreate-mysql-1.recreate-mysql.kuttl-test-valid-filly 2025/08/26 20:41:01 Clone finished. Restarting container... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:01 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:05 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql8.0" in 213ms (213ms including waiting). Image size: 437180759 bytes. kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:38 +0000 UTC Warning PerconaServerMySQL.ps.percona.com recreate ClusterStateChanged Initializing -> Ready ps-controller logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-0.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-1.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{xtrabackup} Killing Stopping container xtrabackup kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{mysql} Killing Stopping container mysql kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-mysql-2.spec.containers{pt-heartbeat} Killing Stopping container pt-heartbeat kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-0.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-0.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-1.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-1.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-2.spec.containers{orchestrator} Killing Stopping container orchestrator kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Normal Pod recreate-orc-2.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:54 +0000 UTC Warning PodDisruptionBudget.policy recreate-orchestrator CalculateExpectedPodCountFailed Failed to calculate the number of expected pods: found no controllers for pod "recreate-orc-0" controllermanager logger.go:42: 20:42:02 | recreate | 2025-08-26 20:41:56 +0000 UTC Warning Pod recreate-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/26 20:41:56 MySQL state is not ready... kubelet logger.go:42: 20:42:02 | recreate | 2025-08-26 20:42:01 +0000 UTC Warning Pod recreate-mysql-0.spec.containers{mysql} Unhealthy Readiness probe failed: 2025/08/26 20:42:01 MySQL state is not ready... kubelet logger.go:42: 20:42:02 | recreate | Deleting namespace: kuttl-test-valid-filly === NAME kuttl harness.go:403: run tests finished harness.go:510: cleaning up harness.go:567: removing temp folder: "" --- PASS: kuttl (749.96s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/recreate (749.24s) PASS