=== RUN   kuttl
    harness.go:464: starting setup
    harness.go:255: running tests using configured kubeconfig.
    harness.go:278: Successful connection to cluster at: https://34.57.56.84
    harness.go:363: running tests
    harness.go:75: going to run test suite with timeout of 180 seconds for each step
    harness.go:375: testsuite: e2e-tests/tests has 34 tests
=== RUN   kuttl/harness
=== RUN   kuttl/harness/operator-self-healing
=== PAUSE kuttl/harness/operator-self-healing
=== CONT  kuttl/harness/operator-self-healing
    logger.go:42: 17:37:32 | operator-self-healing | Creating namespace: kuttl-test-loving-spider
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | starting test step 0-deploy-operator
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        init_temp_dir # do this only in the first TestStep
        
        deploy_operator
        deploy_non_tls_cluster_secrets
        deploy_tls_cluster_secrets
        deploy_client]
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | + source ../../functions
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ realpath ../../..
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++++ pwd
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++ test_name=operator-self-healing
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++++ which gdate
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | ++++ which date
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ date=/usr/bin/date
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ oc get projects
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ :
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ kubectl get nodes
    logger.go:42: 17:37:32 | operator-self-healing/0-deploy-operator | +++ grep '^minikube'
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + init_temp_dir
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + rm -rf /tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + deploy_operator
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + destroy_operator
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | Error from server (NotFound): deployments.apps "percona-server-mysql-operator" not found
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + true
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + [[ -n ps-operator ]]
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --force --grace-period=0
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | Error from server (NotFound): namespaces "ps-operator" not found
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + true
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + [[ -n ps-operator ]]
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + create_namespace ps-operator
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + local namespace=ps-operator
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + [[ -n '' ]]
    logger.go:42: 17:37:33 | operator-self-healing/0-deploy-operator | + kubectl delete namespace ps-operator --ignore-not-found
    logger.go:42: 17:37:34 | operator-self-healing/0-deploy-operator | + kubectl wait --for=delete namespace ps-operator
    logger.go:42: 17:37:34 | operator-self-healing/0-deploy-operator | + kubectl create namespace ps-operator
    logger.go:42: 17:37:35 | operator-self-healing/0-deploy-operator | namespace/ps-operator created
    logger.go:42: 17:37:35 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/crd.yaml
    logger.go:42: 17:37:35 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied
    logger.go:42: 17:37:35 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied
    logger.go:42: 17:37:37 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied
    logger.go:42: 17:37:37 | operator-self-healing/0-deploy-operator | + '[' -n ps-operator ']'
    logger.go:42: 17:37:37 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cw-rbac.yaml
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | clusterrole.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | clusterrolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator unchanged
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"'
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"'
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | + kubectl -n ps-operator apply -f -
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:37:38 | operator-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-825-808887c6"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cw-operator.yaml
    logger.go:42: 17:37:40 | operator-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created
    logger.go:42: 17:37:40 | operator-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created
    logger.go:42: 17:37:40 | operator-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets
    logger.go:42: 17:37:40 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-loving-spider apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/secrets.yaml
    logger.go:42: 17:37:41 | operator-self-healing/0-deploy-operator | secret/test-secrets created
    logger.go:42: 17:37:41 | operator-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets
    logger.go:42: 17:37:41 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-loving-spider apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/ssl-secret.yaml
    logger.go:42: 17:37:42 | operator-self-healing/0-deploy-operator | secret/test-ssl created
    logger.go:42: 17:37:42 | operator-self-healing/0-deploy-operator | + deploy_client
    logger.go:42: 17:37:42 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-loving-spider apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/client.yaml
    logger.go:42: 17:37:43 | operator-self-healing/0-deploy-operator | pod/mysql-client created
    logger.go:42: 17:37:43 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:37:43 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:37:44 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:37:45 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:37:45 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:37:45 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:37:47 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:37:47 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:37:47 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:37:48 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:37:48 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:37:49 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:37:50 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:37:50 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:37:51 | operator-self-healing/0-deploy-operator | INFO   Found 1 resource(s).
    logger.go:42: 17:37:51 | operator-self-healing/0-deploy-operator | NAME                            NAMESPACE     COL0
    logger.go:42: 17:37:51 | operator-self-healing/0-deploy-operator | percona-server-mysql-operator   ps-operator   1
    logger.go:42: 17:37:51 | operator-self-healing/0-deploy-operator | ASSERT PASS
    logger.go:42: 17:37:51 | operator-self-healing/0-deploy-operator | test step completed 0-deploy-operator
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        deploy_chaos_mesh]
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | + source ../../functions
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ realpath ../../..
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++++ pwd
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ test_name=operator-self-healing
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++++ which gdate
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++++ which date
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ oc get projects
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ :
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube'
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ tail -n1
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE//
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}'
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:37:51 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns=
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']'
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:37:52 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:37:53 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:37:54 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:37:55 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | + :
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | "chaos-mesh" has been added to your repositories
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']'
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-loving-spider --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:37:56 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Tue Mar 11 17:38:09 2025
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-loving-spider
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | STATUS: deployed
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | REVISION: 1
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | TEST SUITE: None
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | NOTES:
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh |    kubectl get pods --namespace kuttl-test-loving-spider -l app.kubernetes.io/instance=chaos-mesh
    logger.go:42: 17:38:23 | operator-self-healing/1-deploy-chaos-mesh | + sleep 10
[controller-runtime] log.SetLogger(...) was never called; logs will not be displayed.
Detected at:
	>  goroutine 29 [running]:
	>  runtime/debug.Stack()
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/runtime/debug/stack.go:24 +0x5e
	>  sigs.k8s.io/controller-runtime/pkg/log.eventuallyFulfillRoot()
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/log.go:60 +0xcd
	>  sigs.k8s.io/controller-runtime/pkg/log.(*delegatingLogSink).WithName(0xc0002ebc00, {0x184a055, 0x14})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/log/deleg.go:147 +0x3e
	>  github.com/go-logr/logr.Logger.WithName({{0x1acb7d8, 0xc0002ebc00}, 0x0}, {0x184a055?, 0xc0005adf80?})
	>  	/home/mowsiany/go/pkg/mod/github.com/go-logr/logr@v1.2.4/logr.go:336 +0x36
	>  sigs.k8s.io/controller-runtime/pkg/client.newClient(0x131ead3?, {0x0, 0xc00045aaf0, {0x1accd90, 0xc0005500c0}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:122 +0xf1
	>  sigs.k8s.io/controller-runtime/pkg/client.New(0xc0003d8d88?, {0x0, 0xc00045aaf0, {0x1accd90, 0xc0005500c0}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/pkg/mod/sigs.k8s.io/controller-runtime@v0.16.3/pkg/client/client.go:103 +0x7d
	>  github.com/kudobuilder/kuttl/pkg/test/utils.NewRetryClient(0xc0003d8d88, {0x0, 0xc00045aaf0, {0x1accd90, 0xc0005500c0}, 0x0, {0x0, 0x0}, 0x0})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/utils/kubernetes.go:177 +0x127
	>  github.com/kudobuilder/kuttl/pkg/test.(*Harness).Client(0xc0003af208, 0x47?)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:323 +0x18e
	>  github.com/kudobuilder/kuttl/pkg/test.(*Step).Create(0xc0004cd930, 0xc000133380, {0xc0004de840, 0x18})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:177 +0x63
	>  github.com/kudobuilder/kuttl/pkg/test.(*Step).Run(0xc0004cd930, 0xc000133380, {0xc0004de840, 0x18})
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/step.go:457 +0x24a
	>  github.com/kudobuilder/kuttl/pkg/test.(*Case).Run(0xc0003aab40, 0xc000133380, 0xc000498480)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/case.go:373 +0xaeb
	>  github.com/kudobuilder/kuttl/pkg/test.(*Harness).RunTests.func1.1(0xc000133380)
	>  	/home/mowsiany/go/src/github.com/kudobuilder/kuttl/pkg/test/harness.go:401 +0x12e
	>  testing.tRunner(0xc000133380, 0xc000010bb8)
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1689 +0xfb
	>  created by testing.(*T).Run in goroutine 28
	>  	/nix/store/wkbckbd30nlhq4dxzg64q6y4vm1xx4fk-go-1.22.1/share/go/src/testing/testing.go:1742 +0x390
    logger.go:42: 17:38:34 | operator-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | starting test step 2-create-cluster
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        get_cr \
            | yq eval '.spec.mysql.clusterType="async"' - \
            | yq eval '.spec.mysql.size=3' - \
            | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.proxy.haproxy.enabled=true' - \
            | yq eval '.spec.proxy.haproxy.size=3' - \
            | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.orchestrator.enabled=true' - \
            | yq eval '.spec.orchestrator.size=3' - \
            | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \
            | kubectl -n "${NAMESPACE}" apply -f -]
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + source ../../functions
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ realpath ../../..
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++++ pwd
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ test_name=operator-self-healing
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++++ which gdate
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++++ which date
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ date=/usr/bin/date
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ oc get projects
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ :
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ kubectl get nodes
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | +++ grep '^minikube'
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + get_cr
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + local name_suffix=
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' operator-self-healing
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cr.yaml
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=true -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + '[' -n '' ']'
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.size=3 -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + kubectl -n kuttl-test-loving-spider apply -f -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-825-808887c6"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' -
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:38:34 | operator-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' -
    logger.go:42: 17:38:35 | operator-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/operator-self-healing created
    logger.go:42: 17:41:51 | operator-self-healing/2-create-cluster | test step completed 2-create-cluster
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | starting test step 3-write-data
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        run_mysql \
            "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \
            "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"
        
        run_mysql \
            "INSERT myDB.myTable (id) VALUES (100500)" \
            "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"
        sleep 5]
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | + source ../../functions
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ realpath ../../..
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++++ pwd
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++ test_name=operator-self-healing
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++++ which gdate
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | ++++ which date
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ date=/usr/bin/date
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ oc get projects
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ :
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ kubectl get nodes
    logger.go:42: 17:41:51 | operator-self-healing/3-write-data | +++ grep '^minikube'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | +++ get_cluster_name
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-loving-spider get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + local pod=
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | ++ get_client_pod
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-loving-spider get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + client_pod=mysql-client
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + wait_pod mysql-client
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + local pod=mysql-client
    logger.go:42: 17:41:52 | operator-self-healing/3-write-data | + set +o xtrace
    logger.go:42: 17:41:53 | operator-self-healing/3-write-data | mysql-clienttrue
    logger.go:42: 17:41:53 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-loving-spider exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:53 | operator-self-healing/3-write-data | + sed -e 's/mysql: //'
    logger.go:42: 17:41:53 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.'
    logger.go:42: 17:41:54 | operator-self-healing/3-write-data | + :
    logger.go:42: 17:41:54 | operator-self-healing/3-write-data | +++ get_cluster_name
    logger.go:42: 17:41:54 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-loving-spider get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + local pod=
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | ++ get_client_pod
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-loving-spider get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + client_pod=mysql-client
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + wait_pod mysql-client
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + local pod=mysql-client
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + set +o xtrace
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | mysql-clienttrue
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-loving-spider exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + sed -e 's/mysql: //'
    logger.go:42: 17:41:55 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.'
    logger.go:42: 17:41:57 | operator-self-healing/3-write-data | + :
    logger.go:42: 17:41:57 | operator-self-healing/3-write-data | + sleep 5
    logger.go:42: 17:42:02 | operator-self-healing/3-write-data | test step completed 3-write-data
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | starting test step 4-read-from-primary
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password")
        
        kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"]
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | + source ../../functions
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ realpath ../../..
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ pwd
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++ test_name=operator-self-healing
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ which gdate
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ which date
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ date=/usr/bin/date
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ oc get projects
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ :
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ kubectl get nodes
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | +++ grep '^minikube'
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ get_cluster_name
    logger.go:42: 17:42:02 | operator-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-loving-spider get ps -o 'jsonpath={.items[0].metadata.name}'
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | +++ get_haproxy_svc operator-self-healing
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | +++ local cluster=operator-self-healing
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | +++ echo operator-self-healing-haproxy
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable'
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ local 'uri=-h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ local pod=
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | +++ get_client_pod
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-loving-spider get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ client_pod=mysql-client
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ wait_pod mysql-client
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ local pod=mysql-client
    logger.go:42: 17:42:03 | operator-self-healing/4-read-from-primary | ++ set +o xtrace
    logger.go:42: 17:42:04 | operator-self-healing/4-read-from-primary | mysql-clienttrue
    logger.go:42: 17:42:04 | operator-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-loving-spider exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password'
    logger.go:42: 17:42:04 | operator-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //'
    logger.go:42: 17:42:04 | operator-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.'
    logger.go:42: 17:42:05 | operator-self-healing/4-read-from-primary | + data=100500
    logger.go:42: 17:42:05 | operator-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-loving-spider 04-read-from-primary --from-literal=data=100500
    logger.go:42: 17:42:05 | operator-self-healing/4-read-from-primary | configmap/04-read-from-primary created
    logger.go:42: 17:42:06 | operator-self-healing/4-read-from-primary | test step completed 4-read-from-primary
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | starting test step 5-kill-pod
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        init_pod=$(get_operator_pod)
        kill_pods "${OPERATOR_NS:-$NAMESPACE}" "pod" "$init_pod" "" "operator"
        sleep 10 # wait a bit for pod to be killed
        wait_deployment percona-server-mysql-operator "${OPERATOR_NS:-$NAMESPACE}"
        
        if [ "$init_pod" == "$(get_operator_pod)" ]; then
            echo "operator pod was not killed! something went wrong."
            exit 1
        fi]
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | + source ../../functions
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ realpath ../../..
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++++ pwd
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++ test_name=operator-self-healing
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++++ which gdate
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++++ which date
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ date=/usr/bin/date
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ oc get projects
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ :
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ kubectl get nodes
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | +++ grep '^minikube'
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++ get_operator_pod
    logger.go:42: 17:42:06 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + init_pod=percona-server-mysql-operator-7959bb8cdc-42ncg
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + kill_pods ps-operator pod percona-server-mysql-operator-7959bb8cdc-42ncg '' operator
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + local ns=ps-operator
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + local selector=pod
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + local pod_label=percona-server-mysql-operator-7959bb8cdc-42ncg
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + local label_value=
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + local chaos_suffix=operator
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + '[' pod == pod ']'
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + yq eval '
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | 			.metadata.name = "chaos-pod-kill-operator" |
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | 			del(.spec.selector.pods.test-namespace) |
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | 			.spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7959bb8cdc-42ncg"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/chaos-pod-kill.yml
    logger.go:42: 17:42:07 | operator-self-healing/5-kill-pod | + kubectl apply --namespace ps-operator -f -
    logger.go:42: 17:42:09 | operator-self-healing/5-kill-pod | podchaos.chaos-mesh.org/chaos-pod-kill-operator created
    logger.go:42: 17:42:09 | operator-self-healing/5-kill-pod | + sleep 5
    logger.go:42: 17:42:14 | operator-self-healing/5-kill-pod | + sleep 10
    logger.go:42: 17:42:24 | operator-self-healing/5-kill-pod | + wait_deployment percona-server-mysql-operator ps-operator
    logger.go:42: 17:42:24 | operator-self-healing/5-kill-pod | + local name=percona-server-mysql-operator
    logger.go:42: 17:42:24 | operator-self-healing/5-kill-pod | + local target_namespace=ps-operator
    logger.go:42: 17:42:24 | operator-self-healing/5-kill-pod | + sleep 10
    logger.go:42: 17:42:34 | operator-self-healing/5-kill-pod | + set +o xtrace
    logger.go:42: 17:42:35 | operator-self-healing/5-kill-pod | percona-server-mysql-operator
    logger.go:42: 17:42:35 | operator-self-healing/5-kill-pod | ++ get_operator_pod
    logger.go:42: 17:42:35 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:42:35 | operator-self-healing/5-kill-pod | + '[' percona-server-mysql-operator-7959bb8cdc-42ncg == percona-server-mysql-operator-7959bb8cdc-t454f ']'
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | INFO   Found 1 resource(s).
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | NAME                            NAMESPACE     COL0
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | percona-server-mysql-operator   ps-operator   1
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | ASSERT PASS
    logger.go:42: 17:42:36 | operator-self-healing/5-kill-pod | test step completed 5-kill-pod
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | starting test step 6-scale-up
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        get_cr \
            | yq eval '.spec.mysql.clusterType="async"' - \
            | yq eval '.spec.mysql.size=3' - \
            | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.proxy.haproxy.enabled=true' - \
            | yq eval '.spec.proxy.haproxy.size=5' - \
            | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.orchestrator.enabled=true' - \
            | yq eval '.spec.orchestrator.size=3' - \
            | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \
            | kubectl -n "${NAMESPACE}" apply -f -]
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + source ../../functions
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ realpath ../../..
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++++ pwd
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ test_name=operator-self-healing
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++++ which gdate
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++++ which date
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ date=/usr/bin/date
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ oc get projects
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ :
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ kubectl get nodes
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | +++ grep '^minikube'
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + get_cr
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + local name_suffix=
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cr.yaml
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.enabled=true -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.size=3 -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.size=5 -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + kubectl -n kuttl-test-loving-spider apply -f -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.enabled=true -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval .spec.mysql.size=3 -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.secretsName="test-secrets"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-825-808887c6"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + '[' -n '' ']'
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' -
    logger.go:42: 17:42:36 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' -
    logger.go:42: 17:42:38 | operator-self-healing/6-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured
    logger.go:42: 17:42:48 | operator-self-healing/6-scale-up | test step completed 6-scale-up
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | starting test step 7-network-loss
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        network_loss "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator"
        sleep 30 # wait for network loss to happen]
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | + source ../../functions
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ realpath ../../..
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++++ pwd
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++ test_name=operator-self-healing
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++++ which gdate
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++++ which date
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ date=/usr/bin/date
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ oc get projects
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ :
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ kubectl get nodes
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | +++ grep '^minikube'
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++ get_operator_pod
    logger.go:42: 17:42:48 | operator-self-healing/7-network-loss | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + network_loss ps-operator percona-server-mysql-operator-7959bb8cdc-t454f operator
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + local ns=ps-operator
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + local pod=percona-server-mysql-operator-7959bb8cdc-t454f
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + local chaos_suffix=operator
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + kubectl apply --namespace ps-operator -f -
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss | + yq eval '
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss |         .metadata.name = "chaos-pod-network-loss-operator" |
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss |         del(.spec.selector.pods.test-namespace) |
    logger.go:42: 17:42:49 | operator-self-healing/7-network-loss |         .spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7959bb8cdc-t454f"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/chaos-network-loss.yml
    logger.go:42: 17:42:50 | operator-self-healing/7-network-loss | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator created
    logger.go:42: 17:42:50 | operator-self-healing/7-network-loss | + sleep 5
    logger.go:42: 17:42:55 | operator-self-healing/7-network-loss | + sleep 30
    logger.go:42: 17:43:25 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:25 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:26 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:27 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:27 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:28 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:29 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:29 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:29 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:31 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:31 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:31 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:32 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:32 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:33 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:34 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:34 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:34 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:36 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:36 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:36 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:37 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:37 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:38 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:39 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:39 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:39 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:41 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:41 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:41 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:42 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:42 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:43 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:44 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:44 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:44 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:46 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:46 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:46 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:47 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:47 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:48 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:49 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:49 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:50 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:51 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:51 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:51 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:52 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:53 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:53 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:54 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:54 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:55 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:56 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:56 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:56 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:43:58 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:43:58 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:43:59 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:00 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:00 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:00 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:02 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:02 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:02 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:03 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:03 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:04 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:05 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:05 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:05 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:07 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:07 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:07 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:08 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:08 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:09 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:10 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:10 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:10 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:12 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:12 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:12 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:14 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:14 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:14 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:15 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:15 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:16 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:17 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:17 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:17 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:19 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:19 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:19 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:20 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:20 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:21 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:22 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:22 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:22 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:24 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:24 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:24 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:25 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:25 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:26 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:27 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:27 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:28 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | INFO   Found 1 resource(s).
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | NAME                            NAMESPACE     COL0
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | percona-server-mysql-operator   ps-operator   1
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | ASSERT PASS
    logger.go:42: 17:44:29 | operator-self-healing/7-network-loss | test step completed 7-network-loss
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | starting test step 8-scale-down
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        get_cr \
            | yq eval '.spec.mysql.clusterType="async"' - \
            | yq eval '.spec.mysql.size=3' - \
            | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.proxy.haproxy.enabled=true' - \
            | yq eval '.spec.proxy.haproxy.size=3' - \
            | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.orchestrator.enabled=true' - \
            | yq eval '.spec.orchestrator.size=3' - \
            | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \
            | kubectl -n "${NAMESPACE}" apply -f -]
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | + source ../../functions
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ realpath ../../..
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++++ pwd
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++ test_name=operator-self-healing
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++++ which gdate
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | ++++ which date
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ date=/usr/bin/date
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ oc get projects
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ :
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ kubectl get nodes
    logger.go:42: 17:44:29 | operator-self-healing/8-scale-down | +++ grep '^minikube'
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + get_cr
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + local name_suffix=
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval .spec.mysql.size=3 -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.metadata.name="%s"' operator-self-healing
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cr.yaml
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.enabled=true -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.size=3 -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.sslSecretName="test-ssl"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.upgradeOptions.apply="disabled"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + kubectl -n kuttl-test-loving-spider apply -f -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.enabled=true -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.size=3 -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.secretsName="test-secrets"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-825-808887c6"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + '[' -n '' ']'
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' -
    logger.go:42: 17:44:30 | operator-self-healing/8-scale-down | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' -
    logger.go:42: 17:44:31 | operator-self-healing/8-scale-down | perconaservermysql.ps.percona.com/operator-self-healing configured
    logger.go:42: 17:44:46 | operator-self-healing/8-scale-down | test step completed 8-scale-down
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | starting test step 9-pod-failure
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        failure_pod "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator"
        sleep 30 # wait for pod failure to happen]
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | + source ../../functions
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ realpath ../../..
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++++ pwd
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++ test_name=operator-self-healing
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++++ which gdate
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | ++++ which date
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ date=/usr/bin/date
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ oc get projects
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ :
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ kubectl get nodes
    logger.go:42: 17:44:46 | operator-self-healing/9-pod-failure | +++ grep '^minikube'
    logger.go:42: 17:44:47 | operator-self-healing/9-pod-failure | ++ get_operator_pod
    logger.go:42: 17:44:47 | operator-self-healing/9-pod-failure | ++ kubectl get pods -n ps-operator --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}'
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + failure_pod ps-operator percona-server-mysql-operator-7959bb8cdc-t454f operator
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + local ns=ps-operator
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + local pod=percona-server-mysql-operator-7959bb8cdc-t454f
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + local chaos_suffix=operator
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + kubectl apply --namespace ps-operator -f -
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure | + yq eval '
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure |         .metadata.name = "chaos-pod-failure-operator" |
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure |         del(.spec.selector.pods.test-namespace) |
    logger.go:42: 17:44:48 | operator-self-healing/9-pod-failure |         .spec.selector.pods.ps-operator[0] = "percona-server-mysql-operator-7959bb8cdc-t454f"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf/chaos-pod-failure.yml
    logger.go:42: 17:44:49 | operator-self-healing/9-pod-failure | podchaos.chaos-mesh.org/chaos-pod-failure-operator created
    logger.go:42: 17:44:49 | operator-self-healing/9-pod-failure | + sleep 5
    logger.go:42: 17:44:54 | operator-self-healing/9-pod-failure | + sleep 30
    logger.go:42: 17:45:24 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:24 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:25 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:26 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:26 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:26 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:28 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:28 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:29 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:30 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:30 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:30 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:32 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:32 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:32 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:33 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:33 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:34 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:35 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:35 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:35 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:37 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:37 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:37 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:38 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:38 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:39 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:40 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:40 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:40 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:42 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:42 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:42 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:43 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:43 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:44 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:45 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:45 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:46 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:47 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:47 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:47 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:48 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:49 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:49 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:50 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:50 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:51 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:52 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:52 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:52 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:54 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:54 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:54 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:55 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:55 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:56 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:57 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:57 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:57 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found.
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1]
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist.
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | INFO   Found 1 resource(s).
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | NAME                            NAMESPACE     COL0
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | percona-server-mysql-operator   ps-operator   1
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | ASSERT PASS
    logger.go:42: 17:45:59 | operator-self-healing/9-pod-failure | test step completed 9-pod-failure
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | starting test step 10-scale-up
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        get_cr \
            | yq eval '.spec.mysql.clusterType="async"' - \
            | yq eval '.spec.mysql.size=3' - \
            | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.proxy.haproxy.enabled=true' - \
            | yq eval '.spec.proxy.haproxy.size=5' - \
            | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \
            | yq eval '.spec.orchestrator.enabled=true' - \
            | yq eval '.spec.orchestrator.size=3' - \
            | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \
            | kubectl -n "${NAMESPACE}" apply -f -]
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | + source ../../functions
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ realpath ../../..
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++++ pwd
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++ test_name=operator-self-healing
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++++ which gdate
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | ++++ which date
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ date=/usr/bin/date
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ oc get projects
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ :
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ kubectl get nodes
    logger.go:42: 17:45:59 | operator-self-healing/10-scale-up | +++ grep '^minikube'
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + get_cr
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval .spec.mysql.size=3 -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + local name_suffix=
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.enabled=true -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.size=5 -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.enabled=true -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + '[' -n '' ']'
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.secretsName="test-secrets"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.size=3 -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-825-808887c6"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + kubectl -n kuttl-test-loving-spider apply -f -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:3-dev-latest"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy/cr.yaml
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' -
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:46:00 | operator-self-healing/10-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' -
    logger.go:42: 17:46:01 | operator-self-healing/10-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured
    logger.go:42: 17:46:15 | operator-self-healing/10-scale-up | test step completed 10-scale-up
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | starting test step 11-destroy-chaos-mesh
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        destroy_chaos_mesh]
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | + source ../../functions
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ realpath ../../..
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++++ pwd
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++ test_name=operator-self-healing
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++++ which gdate
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | ++++ which date
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ date=/usr/bin/date
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ oc get projects
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ :
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ kubectl get nodes
    logger.go:42: 17:46:15 | operator-self-healing/11-destroy-chaos-mesh | +++ grep '^minikube'
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | + destroy_chaos_mesh
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | ++ tail -n1
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '-F ' '{print $2}'
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | ++ sed s/NAMESPACE//
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:46:16 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:46:17 | operator-self-healing/11-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-loving-spider
    logger.go:42: 17:46:17 | operator-self-healing/11-destroy-chaos-mesh | + '[' -n kuttl-test-loving-spider ']'
    logger.go:42: 17:46:17 | operator-self-healing/11-destroy-chaos-mesh | + helm uninstall --wait --timeout 60s chaos-mesh --namespace kuttl-test-loving-spider
    logger.go:42: 17:46:17 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:46:17 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-825/kubeconfig
    logger.go:42: 17:46:26 | operator-self-healing/11-destroy-chaos-mesh | release "chaos-mesh" uninstalled
    logger.go:42: 17:46:26 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration
    logger.go:42: 17:46:26 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:46:26 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | + :
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | + :
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ grep validate-auth
    logger.go:42: 17:46:27 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | + :
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl api-resources
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get awschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:28 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:29 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces
    logger.go:42: 17:46:29 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:29 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:29 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get azurechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:29 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get blockchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:30 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get dnschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get gcpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:31 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:32 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces
    logger.go:42: 17:46:32 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:32 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:32 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get httpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:32 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get iochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:33 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get jvmchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:34 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get kernelchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:35 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces
    logger.go:42: 17:46:35 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:35 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:35 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get networkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:35 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos   chaos-pod-network-loss-operator   ps-operator'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + local kind=NetworkChaos
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos   chaos-pod-network-loss-operator   ps-operator'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-network-loss-operator
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'NetworkChaos   chaos-pod-network-loss-operator   ps-operator'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch NetworkChaos chaos-pod-network-loss-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}'
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator patched
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:36 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-operator" deleted
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get physicalmachinechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get physicalmachines --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:37 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:38 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces
    logger.go:42: 17:46:38 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:38 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:38 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:38 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-failure-operator   ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodChaos
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-failure-operator   ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-failure-operator
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-failure-operator   ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-failure-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-failure-operator patched
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-kill-operator      ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodChaos
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-kill-operator      ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local name=chaos-pod-kill-operator
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodChaos   chaos-pod-kill-operator      ps-operator'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}'
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator
    logger.go:42: 17:46:39 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodChaos chaos-pod-kill-operator -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}'
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org/chaos-pod-kill-operator patched
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-operator" deleted
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-operator" deleted
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podhttpchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:40 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:41 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces
    logger.go:42: 17:46:41 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:41 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:41 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podiochaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:41 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get podnetworkchaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos   percona-server-mysql-operator-7959bb8cdc-t454f   ps-operator'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + local kind=PodNetworkChaos
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos   percona-server-mysql-operator-7959bb8cdc-t454f   ps-operator'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $2}'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + local name=percona-server-mysql-operator-7959bb8cdc-t454f
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ echo 'PodNetworkChaos   percona-server-mysql-operator-7959bb8cdc-t454f   ps-operator'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $3}'
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + local namespace=ps-operator
    logger.go:42: 17:46:42 | operator-self-healing/11-destroy-chaos-mesh | + kubectl patch PodNetworkChaos percona-server-mysql-operator-7959bb8cdc-t454f -n ps-operator --type=merge -p '{"metadata":{"finalizers":[]}}'
    logger.go:42: 17:46:43 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org/percona-server-mysql-operator-7959bb8cdc-t454f patched (no change)
    logger.go:42: 17:46:43 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:43 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces
    logger.go:42: 17:46:43 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org "percona-server-mysql-operator-7959bb8cdc-t454f" deleted
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get remoteclusters --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:44 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get schedules --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get statuschecks --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:45 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces
    logger.go:42: 17:46:46 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:46 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get stresschaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:46 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get timechaos --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get workflownodes --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:47 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')'
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | + kubectl get workflows --all-namespaces --no-headers -o custom-columns=Kind:.kind,Name:.metadata.name,NAMESPACE:.metadata.namespace
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | + read -r line
    logger.go:42: 17:46:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces
    logger.go:42: 17:46:49 | operator-self-healing/11-destroy-chaos-mesh | No resources found
    logger.go:42: 17:46:49 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get crd
    logger.go:42: 17:46:49 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh.org
    logger.go:42: 17:46:49 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:50 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:51 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:52 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted
    logger.go:42: 17:46:52 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:53 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:53 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:53 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:53 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted
    logger.go:42: 17:46:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted
    logger.go:42: 17:46:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted
    logger.go:42: 17:46:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted
    logger.go:42: 17:46:56 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted
    logger.go:42: 17:46:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted
    logger.go:42: 17:47:02 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrolebinding
    logger.go:42: 17:47:02 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:47:02 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | + :
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrole
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh
    logger.go:42: 17:47:03 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}'
    logger.go:42: 17:47:04 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole
    logger.go:42: 17:47:04 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified
    logger.go:42: 17:47:04 | operator-self-healing/11-destroy-chaos-mesh | + :
    logger.go:42: 17:47:04 | operator-self-healing/11-destroy-chaos-mesh | test step completed 11-destroy-chaos-mesh
    logger.go:42: 17:47:04 | operator-self-healing/98-drop-finalizer | starting test step 98-drop-finalizer
    logger.go:42: 17:47:04 | operator-self-healing/98-drop-finalizer | PerconaServerMySQL:kuttl-test-loving-spider/operator-self-healing updated
    logger.go:42: 17:47:04 | operator-self-healing/98-drop-finalizer | test step completed 98-drop-finalizer
    logger.go:42: 17:47:04 | operator-self-healing/99-remove-cluster-gracefully | starting test step 99-remove-cluster-gracefully
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | running command: [sh -c set -o errexit
        set -o xtrace
        
        source ../../functions
        
        destroy_operator]
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | + source ../../functions
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ realpath ../../..
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++++ pwd
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/tests/operator-self-healing
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++ test_name=operator-self-healing
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/vars.sh
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-825
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/deploy
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-825/e2e-tests/conf
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++++ git rev-parse --abbrev-ref HEAD
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export GIT_BRANCH=PR-825
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ GIT_BRANCH=PR-825
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export VERSION=PR-825-808887c6
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ VERSION=PR-825-808887c6
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-825-808887c6
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ PMM_SERVER_VERSION=1.4.0
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:3-dev-latest
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:3-dev-latest
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ export CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ CERT_MANAGER_VER=1.16.3
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++++ which gdate
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-825/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin)
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | ++++ which date
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ date=/usr/bin/date
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ oc get projects
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ :
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ kubectl get nodes
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | +++ grep '^minikube'
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | + destroy_operator
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | + kubectl -n ps-operator delete deployment percona-server-mysql-operator --force --grace-period=0
    logger.go:42: 17:47:05 | operator-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 17:47:06 | operator-self-healing/99-remove-cluster-gracefully | deployment.apps "percona-server-mysql-operator" force deleted
    logger.go:42: 17:47:06 | operator-self-healing/99-remove-cluster-gracefully | + [[ -n ps-operator ]]
    logger.go:42: 17:47:06 | operator-self-healing/99-remove-cluster-gracefully | + kubectl delete namespace ps-operator --force --grace-period=0
    logger.go:42: 17:47:06 | operator-self-healing/99-remove-cluster-gracefully | Warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
    logger.go:42: 17:47:06 | operator-self-healing/99-remove-cluster-gracefully | namespace "ps-operator" force deleted
    logger.go:42: 17:47:12 | operator-self-healing/99-remove-cluster-gracefully | test step completed 99-remove-cluster-gracefully
    logger.go:42: 17:47:13 | operator-self-healing | operator-self-healing events from ns kuttl-test-loving-spider:
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:37:43 +0000 UTC	Normal	Pod mysql-client	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/mysql-client to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:37:44 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Pulled	Container image "percona/percona-server:8.0.33" already present on machine	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:37:44 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Created	Created container: mysql-client	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:37:44 +0000 UTC	Normal	Pod mysql-client.spec.containers{mysql-client}		Started	Started container mysql-client	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	ReplicaSet.apps chaos-controller-manager-657db9f6b4		SuccessfulCreate	Created pod: chaos-controller-manager-657db9f6b4-shk6p	replicaset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	ReplicaSet.apps chaos-controller-manager-657db9f6b4		SuccessfulCreate	Created pod: chaos-controller-manager-657db9f6b4-jvlp7	replicaset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	Deployment.apps chaos-controller-manager		ScalingReplicaSet	Scaled up replica set chaos-controller-manager-657db9f6b4 to 3	deployment-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	Pod chaos-daemon-7d7f8	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-daemon-7d7f8 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-z39c	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	Pod chaos-daemon-qszd6	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-daemon-qszd6 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	Pod chaos-daemon-zmjrt	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-daemon-zmjrt to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	Pod chaos-daemon-zmjrt.spec.containers{chaos-daemon}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	DaemonSet.apps chaos-daemon		SuccessfulCreate	Created pod: chaos-daemon-7d7f8	daemonset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	DaemonSet.apps chaos-daemon		SuccessfulCreate	Created pod: chaos-daemon-zmjrt	daemonset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:22 +0000 UTC	Normal	DaemonSet.apps chaos-daemon		SuccessfulCreate	Created pod: chaos-daemon-qszd6	daemonset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-controller-manager-657db9f6b4-jvlp7 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7.spec.containers{chaos-mesh}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-controller-manager-657db9f6b4-pcmwj to gke-jen-ps-825-808887c6--default-pool-1749e6e7-z39c	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj.spec.containers{chaos-mesh}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/chaos-controller-manager-657db9f6b4-shk6p to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p.spec.containers{chaos-mesh}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	ReplicaSet.apps chaos-controller-manager-657db9f6b4		SuccessfulCreate	Created pod: chaos-controller-manager-657db9f6b4-pcmwj	replicaset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-daemon-7d7f8.spec.containers{chaos-daemon}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:23 +0000 UTC	Normal	Pod chaos-daemon-qszd6.spec.containers{chaos-daemon}		Pulling	Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7.spec.containers{chaos-mesh}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 3.381s (3.381s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7.spec.containers{chaos-mesh}		Created	Created container: chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7.spec.containers{chaos-mesh}		Started	Started container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj.spec.containers{chaos-mesh}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 3.543s (3.543s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj.spec.containers{chaos-mesh}		Created	Created container: chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj.spec.containers{chaos-mesh}		Started	Started container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p.spec.containers{chaos-mesh}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 3.378s (3.378s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p.spec.containers{chaos-mesh}		Created	Created container: chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p.spec.containers{chaos-mesh}		Started	Started container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	ConfigMap chaos-mesh		LeaderElection	chaos-controller-manager-657db9f6b4-shk6p_21fa2080-3e20-404a-835a-767fd1d6ac9d became leader		
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:27 +0000 UTC	Normal	Lease.coordination.k8s.io chaos-mesh		LeaderElection	chaos-controller-manager-657db9f6b4-shk6p_21fa2080-3e20-404a-835a-767fd1d6ac9d became leader		
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-qszd6.spec.containers{chaos-daemon}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.109s (10.109s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-qszd6.spec.containers{chaos-daemon}		Created	Created container: chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-qszd6.spec.containers{chaos-daemon}		Started	Started container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-zmjrt.spec.containers{chaos-daemon}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.291s (10.291s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-zmjrt.spec.containers{chaos-daemon}		Created	Created container: chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:33 +0000 UTC	Normal	Pod chaos-daemon-zmjrt.spec.containers{chaos-daemon}		Started	Started container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:34 +0000 UTC	Normal	Pod chaos-daemon-7d7f8.spec.containers{chaos-daemon}		Pulled	Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 11.169s (11.169s including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:34 +0000 UTC	Normal	Pod chaos-daemon-7d7f8.spec.containers{chaos-daemon}		Created	Created container: chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:34 +0000 UTC	Normal	Pod chaos-daemon-7d7f8.spec.containers{chaos-daemon}		Started	Started container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-0		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-0		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-0		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-loving-spider/datadir-operator-self-healing-mysql-0"	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Claim datadir-operator-self-healing-mysql-0 Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql success	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	Pod operator-self-healing-orc-0	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-orc-0 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:37 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-orc		SuccessfulCreate	create Pod operator-self-healing-orc-0 in StatefulSet operator-self-healing-orc successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:38 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:38 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 257ms (257ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:38 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.initContainers{orc-init}		Created	Created container: orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:38 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:41 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-0		ProvisioningSucceeded	Successfully provisioned volume pvc-c423610f-ed68-422d-bd19-f8900b128c0c	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:41 +0000 UTC	Normal	Pod operator-self-healing-mysql-0	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-mysql-0 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 210ms (210ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{orc}		Created	Created container: orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:45 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 222ms (222ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:46 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:46 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:49 +0000 UTC	Normal	Pod operator-self-healing-mysql-0		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-c423610f-ed68-422d-bd19-f8900b128c0c" 	attachdetach-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:52 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:52 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 252ms (252ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:52 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init}		Created	Created container: mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:52 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 247ms (247ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Created	Created container: mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 182ms (182ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:38:59 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{xtrabackup}		Created	Created container: xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:00 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:00 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:00 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 257ms (257ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:00 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat}		Created	Created container: pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:00 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:18 +0000 UTC	Normal	Pod operator-self-healing-orc-1	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-orc-1 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:18 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:18 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-orc		SuccessfulCreate	create Pod operator-self-healing-orc-1 in StatefulSet operator-self-healing-orc successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:19 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 239ms (239ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:19 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.initContainers{orc-init}		Created	Created container: orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:19 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:20 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:20 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 244ms (244ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:20 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{orc}		Created	Created container: orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:20 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:20 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:21 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 233ms (233ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:21 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:21 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:32 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-1		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:32 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-1		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:32 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-1		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-loving-spider/datadir-operator-self-healing-mysql-1"	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:32 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Claim datadir-operator-self-healing-mysql-1 Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql success	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:32 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:35 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulCreate	create Pod operator-self-healing-haproxy-0 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-1		ProvisioningSucceeded	Successfully provisioned volume pvc-e85f9dfb-afe3-45dd-9acb-fd39c026cadf	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-0 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 280ms (280ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:36 +0000 UTC	Normal	Pod operator-self-healing-mysql-1	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-mysql-1 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-z39c	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:38 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:38 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 212ms (212ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:38 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:38 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:38 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 235ms (235ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-1 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:39 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulCreate	create Pod operator-self-healing-haproxy-1 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:40 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:40 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 237ms (237ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:40 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:40 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 217ms (217ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 234ms (234ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-2 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-z39c	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:42 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulCreate	create Pod operator-self-healing-haproxy-2 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 283ms (283ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:43 +0000 UTC	Normal	Pod operator-self-healing-mysql-1		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-e85f9dfb-afe3-45dd-9acb-fd39c026cadf" 	attachdetach-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 311ms (312ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 372ms (372ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:45 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init}		Created	Created container: mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 243ms (243ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:46 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:47 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:47 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 244ms (244ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:47 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Created	Created container: mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 263ms (263ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{xtrabackup}		Created	Created container: xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 245ms (245ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat}		Created	Created container: pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:48 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:53 +0000 UTC	Normal	Pod operator-self-healing-orc-2	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-orc-2 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:53 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-orc		SuccessfulCreate	create Pod operator-self-healing-orc-2 in StatefulSet operator-self-healing-orc successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:54 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.initContainers{orc-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:54 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.initContainers{orc-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 266ms (266ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:54 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.initContainers{orc-init}		Created	Created container: orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:54 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.initContainers{orc-init}		Started	Started container orc-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:56 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{orc}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:56 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{orc}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 199ms (199ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:56 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{orc}		Created	Created container: orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:56 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{orc}		Started	Started container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:56 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:57 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 212ms (212ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:57 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:39:57 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:06 +0000 UTC	Warning	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Unhealthy	Startup probe failed: 2025/03/11 17:40:05 Peers: [3361333965316331.operator-self-healing-mysql-unready.kuttl-test-loving-spider 3664383564313362.operator-self-healing-mysql-unready.kuttl-test-loving-spider]
        2025/03/11 17:40:05 FQDN: operator-self-healing-mysql-1.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:40:05 Primary: operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider Replicas: [operator-self-healing-mysql-1.operator-self-healing-mysql.kuttl-test-loving-spider]
        2025/03/11 17:40:05 lookup operator-self-healing-mysql-1 [10.193.56.36]
        2025/03/11 17:40:05 PodIP: 10.193.56.36
        2025/03/11 17:40:05 lookup operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider [10.193.58.47]
        2025/03/11 17:40:05 PrimaryIP: 10.193.58.47
        2025/03/11 17:40:05 Donor: operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:40:05 Opening connection to 10.193.56.36
        2025/03/11 17:40:05 Clone required: true
        2025/03/11 17:40:05 Checking if a clone in progress
        2025/03/11 17:40:05 Clone in progress: false
        2025/03/11 17:40:05 Cloning from operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:40:06 Clone finished. Restarting container...
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:10 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 218ms (218ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:40 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-2		WaitForFirstConsumer	waiting for first consumer to be created before binding	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:40 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-2		ExternalProvisioning	Waiting for a volume to be created either by the external provisioner 'pd.csi.storage.gke.io' or manually by the system administrator. If volume creation is delayed, please verify that the provisioner is running and correctly registered.	persistentvolume-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:40 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-2		Provisioning	External provisioner is provisioning volume for claim "kuttl-test-loving-spider/datadir-operator-self-healing-mysql-2"	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:40 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Claim datadir-operator-self-healing-mysql-2 Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql success	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:40 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-mysql		SuccessfulCreate	create Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:44 +0000 UTC	Normal	PersistentVolumeClaim datadir-operator-self-healing-mysql-2		ProvisioningSucceeded	Successfully provisioned volume pvc-9f040442-a0f9-44ef-b700-94951211f920	pd.csi.storage.gke.io_gke-39094cfe6a07481ba2cc-33d5-4745-vm_4de6ca94-5f3f-4aae-aa3d-3a176cb3a99b	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:44 +0000 UTC	Normal	Pod operator-self-healing-mysql-2	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-mysql-2 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:52 +0000 UTC	Normal	Pod operator-self-healing-mysql-2		SuccessfulAttachVolume	AttachVolume.Attach succeeded for volume "pvc-9f040442-a0f9-44ef-b700-94951211f920" 	attachdetach-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:53 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:53 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 264ms (264ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:53 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init}		Created	Created container: mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:53 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init}		Started	Started container mysql-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:55 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-psmysql"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 215ms (215ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Created	Created container: mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Started	Started container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{xtrabackup}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-backup"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{xtrabackup}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 218ms (218ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{xtrabackup}		Created	Created container: xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{xtrabackup}		Started	Started container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-toolkit"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 245ms (245ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat}		Created	Created container: pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:40:56 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat}		Started	Started container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:41:14 +0000 UTC	Warning	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Unhealthy	Startup probe failed: 2025/03/11 17:41:13 Peers: [3361333965316331.operator-self-healing-mysql-unready.kuttl-test-loving-spider 3664383564313362.operator-self-healing-mysql-unready.kuttl-test-loving-spider 3734653539646132.operator-self-healing-mysql-unready.kuttl-test-loving-spider]
        2025/03/11 17:41:13 FQDN: operator-self-healing-mysql-2.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:41:13 Primary: operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider Replicas: [operator-self-healing-mysql-1.operator-self-healing-mysql.kuttl-test-loving-spider operator-self-healing-mysql-2.operator-self-healing-mysql.kuttl-test-loving-spider]
        2025/03/11 17:41:13 lookup operator-self-healing-mysql-2 [10.193.57.32]
        2025/03/11 17:41:13 PodIP: 10.193.57.32
        2025/03/11 17:41:13 lookup operator-self-healing-mysql-0.operator-self-healing-mysql.kuttl-test-loving-spider [10.193.58.47]
        2025/03/11 17:41:13 PrimaryIP: 10.193.58.47
        2025/03/11 17:41:13 Donor: operator-self-healing-mysql-1.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:41:13 Opening connection to 10.193.57.32
        2025/03/11 17:41:13 Clone required: true
        2025/03/11 17:41:13 Checking if a clone in progress
        2025/03/11 17:41:13 Clone in progress: false
        2025/03/11 17:41:13 Cloning from operator-self-healing-mysql-1.operator-self-healing-mysql.kuttl-test-loving-spider
        2025/03/11 17:41:14 Clone finished. Restarting container...
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:41:14 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Killing	Container mysql failed startup probe, will be restarted	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:41:17 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 206ms (206ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-3 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 281ms (281ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:39 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulCreate	create Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:41 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 191ms (191ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 240ms (240ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-4 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:43 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulCreate	create Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:44 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:44 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 289ms (289ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:44 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:44 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 234ms (234ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 223ms (223ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:42:46 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:42 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:42 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulDelete	delete Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:43 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:44:43 +0000 UTC	Normal	StatefulSet.apps operator-self-healing-haproxy		SuccessfulDelete	delete Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful	statefulset-controller	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:07 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-3 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-lq6v	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:08 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:08 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 284ms (284ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:08 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:08 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:09 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:09 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 238ms (238ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:09 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:09 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:10 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:10 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 223ms (223ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:10 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:10 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:10 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4	Binding	Scheduled	Successfully assigned kuttl-test-loving-spider/operator-self-healing-haproxy-4 to gke-jen-ps-825-808887c6--default-pool-1749e6e7-h9s3	default-scheduler	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:11 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:PR-825-808887c6"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:11 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:PR-825-808887c6" in 251ms (251ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:11 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Created	Created container: haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:11 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init}		Started	Started container haproxy-init	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:13 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:13 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 239ms (239ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:13 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Created	Created container: haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:14 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Started	Started container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:14 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Pulling	Pulling image "perconalab/percona-server-mysql-operator:main-haproxy"	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:14 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Pulled	Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 179ms (179ms including waiting)	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:14 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Created	Created container: mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:14 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Started	Started container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:20 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-jvlp7.spec.containers{chaos-mesh}		Killing	Stopping container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:20 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-pcmwj.spec.containers{chaos-mesh}		Killing	Stopping container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:20 +0000 UTC	Normal	Pod chaos-controller-manager-657db9f6b4-shk6p.spec.containers{chaos-mesh}		Killing	Stopping container chaos-mesh	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:21 +0000 UTC	Normal	Pod chaos-daemon-7d7f8.spec.containers{chaos-daemon}		Killing	Stopping container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:21 +0000 UTC	Normal	Pod chaos-daemon-qszd6.spec.containers{chaos-daemon}		Killing	Stopping container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:46:21 +0000 UTC	Normal	Pod chaos-daemon-zmjrt.spec.containers{chaos-daemon}		Killing	Stopping container chaos-daemon	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{haproxy}		Killing	Stopping container haproxy	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:05 +0000 UTC	Normal	Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Killing	Stopping container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat}		Killing	Stopping container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Killing	Stopping container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat}		Killing	Stopping container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{xtrabackup}		Killing	Stopping container xtrabackup	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Killing	Stopping container mysql	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat}		Killing	Stopping container pt-heartbeat	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-0.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-1.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{orc}		Killing	Stopping container orc	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:06 +0000 UTC	Normal	Pod operator-self-healing-orc-2.spec.containers{mysql-monit}		Killing	Stopping container mysql-monit	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:07 +0000 UTC	Warning	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2025/03/11 17:47:07 readiness check failed: connect to db: ping DB: dial tcp 10.193.58.47:33062: connect: connection refused
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:08 +0000 UTC	Warning	Pod operator-self-healing-mysql-2.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2025/03/11 17:47:08 readiness check failed: connect to db: ping DB: dial tcp 10.193.57.32:33062: connect: connection refused
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:10 +0000 UTC	Warning	Pod operator-self-healing-mysql-1.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2025/03/11 17:47:10 readiness check failed: connect to db: ping DB: dial tcp 10.193.56.36:33062: connect: connection refused
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | 2025-03-11 17:47:12 +0000 UTC	Warning	Pod operator-self-healing-mysql-0.spec.containers{mysql}		Unhealthy	Readiness probe failed: 2025/03/11 17:47:12 readiness check failed: connect to db: ping DB: dial tcp 10.193.58.47:33062: connect: connection refused
        	kubelet	
    logger.go:42: 17:47:13 | operator-self-healing | Deleting namespace: kuttl-test-loving-spider
=== NAME  kuttl
    harness.go:407: run tests finished
    harness.go:515: cleaning up
    harness.go:572: removing temp folder: ""
--- PASS: kuttl (629.38s)
    --- PASS: kuttl/harness (0.00s)
        --- PASS: kuttl/harness/operator-self-healing (628.94s)
PASS