=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://35.232.42.60 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 28 tests === RUN kuttl/harness === RUN kuttl/harness/operator-self-healing === PAUSE kuttl/harness/operator-self-healing === CONT kuttl/harness/operator-self-healing logger.go:42: 14:55:11 | operator-self-healing | Creating namespace: kuttl-test-creative-silkworm logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | + source ../../functions logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ realpath ../../.. logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++++ mktemp -d logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/tmp.6YVlRMMxm4 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/tmp.6YVlRMMxm4 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-424 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-424 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export VERSION=PR-424-70568ae logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ VERSION=PR-424-70568ae logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++++ which gdate logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | ++++ which date logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ command -v oc logger.go:42: 14:55:11 | operator-self-healing/0-deploy-operator | +++ oc get projects logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | error: the server doesn't have a resource type "projects" logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | +++ grep '^minikube' logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | +++ kubectl get nodes logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | ++++ pwd logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | ++ test_name=operator-self-healing logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | + deploy_operator logger.go:42: 14:55:17 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/crd.yaml logger.go:42: 14:55:19 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 14:55:19 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 14:55:20 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 14:55:20 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/rbac.yaml logger.go:42: 14:55:21 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 14:55:21 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator-orchestrator created logger.go:42: 14:55:21 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:55:22 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:55:22 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:55:22 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply -f - logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:55:23 | operator-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-424-70568ae"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/operator.yaml logger.go:42: 14:55:24 | operator-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 14:55:25 | operator-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 14:55:25 | operator-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 14:55:25 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/secrets.yaml logger.go:42: 14:55:26 | operator-self-healing/0-deploy-operator | secret/test-secrets created logger.go:42: 14:55:26 | operator-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 14:55:26 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/ssl-secret.yaml logger.go:42: 14:55:27 | operator-self-healing/0-deploy-operator | secret/test-ssl created logger.go:42: 14:55:27 | operator-self-healing/0-deploy-operator | + deploy_client logger.go:42: 14:55:27 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-creative-silkworm apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/client.yaml logger.go:42: 14:55:28 | operator-self-healing/0-deploy-operator | pod/mysql-client created logger.go:42: 14:55:30 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:55:30 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:55:31 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:55:32 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:55:32 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:55:33 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:55:34 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:55:34 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:55:35 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:55:36 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:55:36 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:55:37 | operator-self-healing/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 14:55:37 | operator-self-healing/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 14:55:37 | operator-self-healing/0-deploy-operator | percona-server-mysql-operator kuttl-test-creative-silkworm 1 logger.go:42: 14:55:37 | operator-self-healing/0-deploy-operator | ASSERT PASS logger.go:42: 14:55:37 | operator-self-healing/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_chaos_mesh] logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | + source ../../functions logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ realpath ../../.. logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++++ mktemp -d logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/tmp.bDd8r6LLCZ logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/tmp.bDd8r6LLCZ logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-424 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-424 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-424-70568ae logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-424-70568ae logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++++ which gdate logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | ++++ which date logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ command -v oc logger.go:42: 14:55:37 | operator-self-healing/1-deploy-chaos-mesh | +++ oc get projects logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | error: the server doesn't have a resource type "projects" logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube' logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++++ pwd logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++ tail -n1 logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 14:55:43 | operator-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns= logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:55:44 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:45 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 14:55:45 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd logger.go:42: 14:55:45 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 14:55:45 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:46 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd logger.go:42: 14:55:47 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:47 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:47 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 14:55:47 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:55:47 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:55:48 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:55:49 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:50 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:51 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:55:52 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | "chaos-mesh" has been added to your repositories logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-creative-silkworm --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:55:53 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Thu Aug 17 14:56:07 2023 logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-creative-silkworm logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | STATUS: deployed logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | REVISION: 1 logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | TEST SUITE: None logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | NOTES: logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | kubectl get pods --namespace kuttl-test-creative-silkworm -l app.kubernetes.io/instance=chaos-mesh logger.go:42: 14:56:21 | operator-self-healing/1-deploy-chaos-mesh | + sleep 10 logger.go:42: 14:56:35 | operator-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | starting test step 2-create-cluster logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | + source ../../functions logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ realpath ../../.. logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++++ mktemp -d logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/tmp.k35mZLPut7 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/tmp.k35mZLPut7 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-424 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-424 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export VERSION=PR-424-70568ae logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ VERSION=PR-424-70568ae logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++++ which gdate logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | ++++ which date logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ command -v oc logger.go:42: 14:56:35 | operator-self-healing/2-create-cluster | +++ oc get projects logger.go:42: 14:56:41 | operator-self-healing/2-create-cluster | error: the server doesn't have a resource type "projects" logger.go:42: 14:56:41 | operator-self-healing/2-create-cluster | +++ kubectl get nodes logger.go:42: 14:56:41 | operator-self-healing/2-create-cluster | +++ grep '^minikube' logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++++ pwd logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ test_name=operator-self-healing logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + get_cr logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + local name_suffix= logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + '[' -n '' ']' logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-424-70568ae"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/cr.yaml logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + kubectl -n kuttl-test-creative-silkworm apply -f - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:56:42 | operator-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:56:46 | operator-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/operator-self-healing created logger.go:42: 15:00:01 | operator-self-healing/2-create-cluster | test step completed 2-create-cluster logger.go:42: 15:00:01 | operator-self-healing/3-write-data | starting test step 3-write-data logger.go:42: 15:00:01 | operator-self-healing/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 15:00:01 | operator-self-healing/3-write-data | + source ../../functions logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ realpath ../../.. logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++++ mktemp -d logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/tmp.Vf11x2wByJ logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ TEMP_DIR=/tmp/tmp.Vf11x2wByJ logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ GIT_BRANCH=PR-424 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export VERSION=PR-424-70568ae logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ VERSION=PR-424-70568ae logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++++ which gdate logger.go:42: 15:00:01 | operator-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:00:01 | operator-self-healing/3-write-data | ++++ which date logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ date=/usr/bin/date logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ command -v oc logger.go:42: 15:00:01 | operator-self-healing/3-write-data | +++ oc get projects logger.go:42: 15:00:08 | operator-self-healing/3-write-data | error: the server doesn't have a resource type "projects" logger.go:42: 15:00:08 | operator-self-healing/3-write-data | +++ grep '^minikube' logger.go:42: 15:00:08 | operator-self-healing/3-write-data | +++ kubectl get nodes logger.go:42: 15:00:08 | operator-self-healing/3-write-data | ++++ pwd logger.go:42: 15:00:08 | operator-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:00:08 | operator-self-healing/3-write-data | ++ test_name=operator-self-healing logger.go:42: 15:00:08 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 15:00:08 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-creative-silkworm get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:00:09 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 15:00:09 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 15:00:09 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 15:00:09 | operator-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:09 | operator-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 15:00:09 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:09 | operator-self-healing/3-write-data | + local pod= logger.go:42: 15:00:09 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 15:00:09 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-creative-silkworm get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:00:10 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 15:00:10 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 15:00:10 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 15:00:10 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 15:00:11 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 15:00:11 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 15:00:11 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:00:11 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-creative-silkworm exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:12 | operator-self-healing/3-write-data | + : logger.go:42: 15:00:12 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 15:00:12 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-creative-silkworm get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:00:13 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 15:00:13 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 15:00:13 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 15:00:13 | operator-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:13 | operator-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 15:00:13 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:13 | operator-self-healing/3-write-data | + local pod= logger.go:42: 15:00:13 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 15:00:13 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-creative-silkworm get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 15:00:14 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-creative-silkworm exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 15:00:14 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:00:16 | operator-self-healing/3-write-data | + : logger.go:42: 15:00:18 | operator-self-healing/3-write-data | test step completed 3-write-data logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | starting test step 4-read-from-primary logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"] logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | + source ../../functions logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ realpath ../../.. logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++++ mktemp -d logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/tmp.N8PzT7hawR logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/tmp.N8PzT7hawR logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-424 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export VERSION=PR-424-70568ae logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ VERSION=PR-424-70568ae logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++++ which gdate logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | ++++ which date logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ date=/usr/bin/date logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ command -v oc logger.go:42: 15:00:18 | operator-self-healing/4-read-from-primary | +++ oc get projects logger.go:42: 15:00:23 | operator-self-healing/4-read-from-primary | error: the server doesn't have a resource type "projects" logger.go:42: 15:00:23 | operator-self-healing/4-read-from-primary | +++ grep '^minikube' logger.go:42: 15:00:23 | operator-self-healing/4-read-from-primary | +++ kubectl get nodes logger.go:42: 15:00:24 | operator-self-healing/4-read-from-primary | ++++ pwd logger.go:42: 15:00:24 | operator-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:00:24 | operator-self-healing/4-read-from-primary | ++ test_name=operator-self-healing logger.go:42: 15:00:24 | operator-self-healing/4-read-from-primary | ++++ get_cluster_name logger.go:42: 15:00:24 | operator-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-creative-silkworm get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | +++ get_haproxy_svc operator-self-healing logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | +++ local cluster=operator-self-healing logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | +++ echo operator-self-healing-haproxy logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | ++ local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | ++ local pod= logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | +++ get_client_pod logger.go:42: 15:00:25 | operator-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-creative-silkworm get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ client_pod=mysql-client logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ wait_pod mysql-client logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ local pod=mysql-client logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ set +o xtrace logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | mysql-clienttrue logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-creative-silkworm exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 15:00:26 | operator-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 15:00:28 | operator-self-healing/4-read-from-primary | + data=100500 logger.go:42: 15:00:28 | operator-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-creative-silkworm 04-read-from-primary --from-literal=data=100500 logger.go:42: 15:00:29 | operator-self-healing/4-read-from-primary | configmap/04-read-from-primary created logger.go:42: 15:00:30 | operator-self-healing/4-read-from-primary | test step completed 4-read-from-primary logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | starting test step 5-kill-pod logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_pod=$(get_operator_pod) kill_pods "${OPERATOR_NS:-$NAMESPACE}" "pod" "$init_pod" "" "operator" sleep 10 # wait a bit for pod to be killed wait_deployment percona-server-mysql-operator "${OPERATOR_NS:-$NAMESPACE}" if [ "$init_pod" == "$(get_operator_pod)" ]; then echo "operator pod was not killed! something went wrong." exit 1 fi] logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | + source ../../functions logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ realpath ../../.. logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++++ mktemp -d logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export TEMP_DIR=/tmp/tmp.BOwp8ayBK2 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ TEMP_DIR=/tmp/tmp.BOwp8ayBK2 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ GIT_BRANCH=PR-424 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export VERSION=PR-424-70568ae logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ VERSION=PR-424-70568ae logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++++ which gdate logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | ++++ which date logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ date=/usr/bin/date logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ command -v oc logger.go:42: 15:00:30 | operator-self-healing/5-kill-pod | +++ oc get projects logger.go:42: 15:00:36 | operator-self-healing/5-kill-pod | error: the server doesn't have a resource type "projects" logger.go:42: 15:00:36 | operator-self-healing/5-kill-pod | +++ grep '^minikube' logger.go:42: 15:00:36 | operator-self-healing/5-kill-pod | +++ kubectl get nodes logger.go:42: 15:00:37 | operator-self-healing/5-kill-pod | ++++ pwd logger.go:42: 15:00:37 | operator-self-healing/5-kill-pod | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:00:37 | operator-self-healing/5-kill-pod | ++ test_name=operator-self-healing logger.go:42: 15:00:37 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 15:00:37 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n kuttl-test-creative-silkworm --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + init_pod=percona-server-mysql-operator-6b56d66f99-jlcgp logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + kill_pods kuttl-test-creative-silkworm pod percona-server-mysql-operator-6b56d66f99-jlcgp '' operator logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + local ns=kuttl-test-creative-silkworm logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + local selector=pod logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + local pod_label=percona-server-mysql-operator-6b56d66f99-jlcgp logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + local label_value= logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + local chaos_suffix=operator logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + '[' pod == pod ']' logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + yq eval ' logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | .metadata.name = "chaos-pod-kill-operator" | logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | .spec.selector.pods.kuttl-test-creative-silkworm[0] = "percona-server-mysql-operator-6b56d66f99-jlcgp"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 15:00:38 | operator-self-healing/5-kill-pod | + kubectl apply --namespace kuttl-test-creative-silkworm -f - logger.go:42: 15:00:39 | operator-self-healing/5-kill-pod | podchaos.chaos-mesh.org/chaos-pod-kill-operator created logger.go:42: 15:00:39 | operator-self-healing/5-kill-pod | + sleep 5 logger.go:42: 15:00:44 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 15:00:54 | operator-self-healing/5-kill-pod | + wait_deployment percona-server-mysql-operator kuttl-test-creative-silkworm logger.go:42: 15:00:54 | operator-self-healing/5-kill-pod | + local name=percona-server-mysql-operator logger.go:42: 15:00:54 | operator-self-healing/5-kill-pod | + local target_namespace=kuttl-test-creative-silkworm logger.go:42: 15:00:54 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 15:01:04 | operator-self-healing/5-kill-pod | + set +o xtrace logger.go:42: 15:01:06 | operator-self-healing/5-kill-pod | percona-server-mysql-operator logger.go:42: 15:01:06 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 15:01:06 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n kuttl-test-creative-silkworm --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:01:07 | operator-self-healing/5-kill-pod | + '[' percona-server-mysql-operator-6b56d66f99-jlcgp == percona-server-mysql-operator-6b56d66f99-rvxgs ']' logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | INFO Found 1 resource(s). logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | NAME NAMESPACE COL0 logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | percona-server-mysql-operator kuttl-test-creative-silkworm 1 logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | ASSERT PASS logger.go:42: 15:01:09 | operator-self-healing/5-kill-pod | test step completed 5-kill-pod logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | starting test step 6-scale-up logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | + source ../../functions logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ realpath ../../.. logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++++ mktemp -d logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export TEMP_DIR=/tmp/tmp.QuvQVQveey logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ TEMP_DIR=/tmp/tmp.QuvQVQveey logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ GIT_BRANCH=PR-424 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export VERSION=PR-424-70568ae logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ VERSION=PR-424-70568ae logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++++ which gdate logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | ++++ which date logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ date=/usr/bin/date logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ command -v oc logger.go:42: 15:01:09 | operator-self-healing/6-scale-up | +++ oc get projects logger.go:42: 15:01:15 | operator-self-healing/6-scale-up | error: the server doesn't have a resource type "projects" logger.go:42: 15:01:15 | operator-self-healing/6-scale-up | +++ kubectl get nodes logger.go:42: 15:01:15 | operator-self-healing/6-scale-up | +++ grep '^minikube' logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++++ pwd logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ test_name=operator-self-healing logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + get_cr logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + local name_suffix= logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + '[' -n '' ']' logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/cr.yaml logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-424-70568ae"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + kubectl -n kuttl-test-creative-silkworm apply -f - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:01:16 | operator-self-healing/6-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 15:01:17 | operator-self-healing/6-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:01:40 | operator-self-healing/6-scale-up | test step completed 6-scale-up logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | starting test step 7-network-loss logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | running command: [sh -c set -o errexit set -o xtrace source ../../functions network_loss "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for network loss to happen] logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | + source ../../functions logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ realpath ../../.. logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++++ mktemp -d logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export TEMP_DIR=/tmp/tmp.CtYKmqTTXV logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ TEMP_DIR=/tmp/tmp.CtYKmqTTXV logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ GIT_BRANCH=PR-424 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export VERSION=PR-424-70568ae logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ VERSION=PR-424-70568ae logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++++ which gdate logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | ++++ which date logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ date=/usr/bin/date logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ command -v oc logger.go:42: 15:01:40 | operator-self-healing/7-network-loss | +++ oc get projects logger.go:42: 15:01:46 | operator-self-healing/7-network-loss | error: the server doesn't have a resource type "projects" logger.go:42: 15:01:46 | operator-self-healing/7-network-loss | +++ kubectl get nodes logger.go:42: 15:01:46 | operator-self-healing/7-network-loss | +++ grep '^minikube' logger.go:42: 15:01:47 | operator-self-healing/7-network-loss | ++++ pwd logger.go:42: 15:01:47 | operator-self-healing/7-network-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:01:47 | operator-self-healing/7-network-loss | ++ test_name=operator-self-healing logger.go:42: 15:01:47 | operator-self-healing/7-network-loss | ++ get_operator_pod logger.go:42: 15:01:47 | operator-self-healing/7-network-loss | ++ kubectl get pods -n kuttl-test-creative-silkworm --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + network_loss kuttl-test-creative-silkworm percona-server-mysql-operator-6b56d66f99-rvxgs operator logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + local ns=kuttl-test-creative-silkworm logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + local pod=percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + local chaos_suffix=operator logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + kubectl apply --namespace kuttl-test-creative-silkworm -f - logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | + yq eval ' logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | .metadata.name = "chaos-pod-network-loss-operator" | logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:01:48 | operator-self-healing/7-network-loss | .spec.selector.pods.kuttl-test-creative-silkworm[0] = "percona-server-mysql-operator-6b56d66f99-rvxgs"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/chaos-network-loss.yml logger.go:42: 15:01:49 | operator-self-healing/7-network-loss | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator created logger.go:42: 15:01:49 | operator-self-healing/7-network-loss | + sleep 5 logger.go:42: 15:01:54 | operator-self-healing/7-network-loss | + sleep 30 logger.go:42: 15:02:26 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:26 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:27 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:28 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:28 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:29 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:30 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:30 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:31 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:32 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:32 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:33 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:34 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:34 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:35 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:36 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:36 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:37 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:38 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:38 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:39 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:40 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:40 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:41 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:42 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:42 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:43 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:45 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:45 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:45 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:47 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:47 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:47 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:49 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:49 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:49 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:51 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:51 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:51 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:53 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:53 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:54 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:55 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:55 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:56 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:57 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:57 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:02:58 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:02:59 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:02:59 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:03:00 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 15:03:01 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:03:01 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:03:02 | operator-self-healing/7-network-loss | INFO Found 1 resource(s). logger.go:42: 15:03:02 | operator-self-healing/7-network-loss | NAME NAMESPACE COL0 logger.go:42: 15:03:02 | operator-self-healing/7-network-loss | percona-server-mysql-operator kuttl-test-creative-silkworm 1 logger.go:42: 15:03:02 | operator-self-healing/7-network-loss | ASSERT PASS logger.go:42: 15:03:02 | operator-self-healing/7-network-loss | test step completed 7-network-loss logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | starting test step 8-scale-down logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | + source ../../functions logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ realpath ../../.. logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++++ mktemp -d logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export TEMP_DIR=/tmp/tmp.8GgNGXEVP0 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ TEMP_DIR=/tmp/tmp.8GgNGXEVP0 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ GIT_BRANCH=PR-424 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export VERSION=PR-424-70568ae logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ VERSION=PR-424-70568ae logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++++ which gdate logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | ++++ which date logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ date=/usr/bin/date logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ command -v oc logger.go:42: 15:03:02 | operator-self-healing/8-scale-down | +++ oc get projects logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | error: the server doesn't have a resource type "projects" logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | +++ grep '^minikube' logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | +++ kubectl get nodes logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++++ pwd logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ test_name=operator-self-healing logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + get_cr logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + local name_suffix= logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + kubectl -n kuttl-test-creative-silkworm apply -f - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-424-70568ae"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval .spec.mysql.size=3 - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/cr.yaml logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + '[' -n '' ']' logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:03:08 | operator-self-healing/8-scale-down | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:03:10 | operator-self-healing/8-scale-down | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:03:30 | operator-self-healing/8-scale-down | test step completed 8-scale-down logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | starting test step 9-pod-failure logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | running command: [sh -c set -o errexit set -o xtrace source ../../functions failure_pod "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for pod failure to happen] logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | + source ../../functions logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ realpath ../../.. logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++++ mktemp -d logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export TEMP_DIR=/tmp/tmp.eUpUd3O2Lf logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ TEMP_DIR=/tmp/tmp.eUpUd3O2Lf logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ GIT_BRANCH=PR-424 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export VERSION=PR-424-70568ae logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ VERSION=PR-424-70568ae logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++++ which gdate logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | ++++ which date logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ date=/usr/bin/date logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ command -v oc logger.go:42: 15:03:30 | operator-self-healing/9-pod-failure | +++ oc get projects logger.go:42: 15:03:36 | operator-self-healing/9-pod-failure | error: the server doesn't have a resource type "projects" logger.go:42: 15:03:36 | operator-self-healing/9-pod-failure | +++ grep '^minikube' logger.go:42: 15:03:36 | operator-self-healing/9-pod-failure | +++ kubectl get nodes logger.go:42: 15:03:37 | operator-self-healing/9-pod-failure | ++++ pwd logger.go:42: 15:03:37 | operator-self-healing/9-pod-failure | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:03:37 | operator-self-healing/9-pod-failure | ++ test_name=operator-self-healing logger.go:42: 15:03:37 | operator-self-healing/9-pod-failure | ++ get_operator_pod logger.go:42: 15:03:37 | operator-self-healing/9-pod-failure | ++ kubectl get pods -n kuttl-test-creative-silkworm --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + failure_pod kuttl-test-creative-silkworm percona-server-mysql-operator-6b56d66f99-rvxgs operator logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + local ns=kuttl-test-creative-silkworm logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + local pod=percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + local chaos_suffix=operator logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + yq eval ' logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | .metadata.name = "chaos-pod-failure-operator" | logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | del(.spec.selector.pods.test-namespace) | logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | .spec.selector.pods.kuttl-test-creative-silkworm[0] = "percona-server-mysql-operator-6b56d66f99-rvxgs"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf/chaos-pod-failure.yml logger.go:42: 15:03:38 | operator-self-healing/9-pod-failure | + kubectl apply --namespace kuttl-test-creative-silkworm -f - logger.go:42: 15:03:39 | operator-self-healing/9-pod-failure | podchaos.chaos-mesh.org/chaos-pod-failure-operator created logger.go:42: 15:03:39 | operator-self-healing/9-pod-failure | + sleep 5 logger.go:42: 15:03:44 | operator-self-healing/9-pod-failure | + sleep 30 logger.go:42: 15:04:16 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:16 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:16 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:18 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:18 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:19 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:20 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:20 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:21 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:22 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:22 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:23 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:24 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:24 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:25 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:26 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:26 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:27 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:28 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:28 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:29 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:30 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:30 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:31 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:32 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:32 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:33 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:34 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:34 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:35 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:36 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:36 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:37 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:38 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:38 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:39 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:40 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:40 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:41 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:42 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:42 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:43 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:44 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:44 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:45 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:46 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:47 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:47 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:49 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:49 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:49 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | INFO Found 1 resource(s). logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | NAME NAMESPACE COL0 logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | percona-server-mysql-operator kuttl-test-creative-silkworm 1 logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | ASSERT PASS logger.go:42: 15:04:51 | operator-self-healing/9-pod-failure | test step completed 9-pod-failure logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | starting test step 10-scale-up logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | + source ../../functions logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ realpath ../../.. logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++++ mktemp -d logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export TEMP_DIR=/tmp/tmp.LL6t3xqL52 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ TEMP_DIR=/tmp/tmp.LL6t3xqL52 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ GIT_BRANCH=PR-424 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export VERSION=PR-424-70568ae logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ VERSION=PR-424-70568ae logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++++ which gdate logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | ++++ which date logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ date=/usr/bin/date logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ command -v oc logger.go:42: 15:04:51 | operator-self-healing/10-scale-up | +++ oc get projects logger.go:42: 15:04:57 | operator-self-healing/10-scale-up | error: the server doesn't have a resource type "projects" logger.go:42: 15:04:57 | operator-self-healing/10-scale-up | +++ kubectl get nodes logger.go:42: 15:04:57 | operator-self-healing/10-scale-up | +++ grep '^minikube' logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++++ pwd logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ test_name=operator-self-healing logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + get_cr logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + local name_suffix= logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + '[' -n '' ']' logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy/cr.yaml logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-424-70568ae"' - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 15:04:58 | operator-self-healing/10-scale-up | + kubectl -n kuttl-test-creative-silkworm apply -f - logger.go:42: 15:04:59 | operator-self-healing/10-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 15:05:23 | operator-self-healing/10-scale-up | test step completed 10-scale-up logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | starting test step 11-destroy-chaos-mesh logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_chaos_mesh] logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | + source ../../functions logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ realpath ../../.. logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/vars.sh logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-424 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/deploy logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/conf logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++++ mktemp -d logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/tmp.PwOio3dzQM logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/tmp.PwOio3dzQM logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-424 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ GIT_BRANCH=PR-424 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export VERSION=PR-424-70568ae logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ VERSION=PR-424-70568ae logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-424-70568ae logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM=perconalab/pmm-client:dev-latest logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER_REPO=perconalab/pmm-server logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER_TAG=dev-latest logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++++ which gdate logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-424/bin/:/home/ec2-user/google-cloud-sdk/bin:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | ++++ which date logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ command -v oc logger.go:42: 15:05:23 | operator-self-healing/11-destroy-chaos-mesh | +++ oc get projects logger.go:42: 15:05:29 | operator-self-healing/11-destroy-chaos-mesh | error: the server doesn't have a resource type "projects" logger.go:42: 15:05:29 | operator-self-healing/11-destroy-chaos-mesh | +++ kubectl get nodes logger.go:42: 15:05:29 | operator-self-healing/11-destroy-chaos-mesh | +++ grep '^minikube' logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++++ pwd logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-424/e2e-tests/tests/operator-self-healing logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++ tail -n1 logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 15:05:30 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 15:05:31 | operator-self-healing/11-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-creative-silkworm logger.go:42: 15:05:31 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl api-resources logger.go:42: 15:05:31 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:05:31 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:05:32 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:32 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 15:05:33 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:33 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:33 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:34 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 15:05:35 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:35 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:35 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 15:05:36 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:36 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:36 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:37 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 15:05:38 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:38 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:38 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 15:05:39 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:39 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:39 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-operator" deleted logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:40 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 15:05:41 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:41 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:41 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 15:05:42 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-operator" deleted logger.go:42: 15:05:42 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-operator" deleted logger.go:42: 15:05:42 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:42 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 15:05:43 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:43 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:43 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 15:05:44 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:44 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:44 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 15:05:45 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org "percona-server-mysql-operator-6b56d66f99-rvxgs" deleted logger.go:42: 15:05:45 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:45 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 15:05:47 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:47 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:47 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 15:05:49 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:49 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 15:05:49 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 15:05:50 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 15:05:50 | operator-self-healing/11-destroy-chaos-mesh | + '[' -n kuttl-test-creative-silkworm ']' logger.go:42: 15:05:50 | operator-self-healing/11-destroy-chaos-mesh | + helm uninstall chaos-mesh --namespace kuttl-test-creative-silkworm logger.go:42: 15:05:50 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 15:05:50 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-424/kubeconfig logger.go:42: 15:05:55 | operator-self-healing/11-destroy-chaos-mesh | release "chaos-mesh" uninstalled logger.go:42: 15:05:55 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get crd logger.go:42: 15:05:55 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 15:05:55 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:05:56 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 15:05:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 15:05:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 15:05:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 15:05:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 15:05:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 15:05:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 15:05:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 15:05:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 15:05:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 15:06:00 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 15:06:00 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 15:06:00 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 15:06:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 15:06:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 15:06:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 15:06:03 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 15:06:04 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 15:06:10 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 15:06:10 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:06:10 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:06:11 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:06:12 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 15:06:13 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:06:13 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:06:13 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 15:06:13 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:06:13 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 15:06:14 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | ++ grep validate-auth logger.go:42: 15:06:15 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 15:06:16 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 15:06:16 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 15:06:16 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 15:06:18 | operator-self-healing/11-destroy-chaos-mesh | test step completed 11-destroy-chaos-mesh logger.go:42: 15:06:18 | operator-self-healing/12-drop-finalizer | starting test step 12-drop-finalizer logger.go:42: 15:06:19 | operator-self-healing/12-drop-finalizer | PerconaServerMySQL:kuttl-test-creative-silkworm/operator-self-healing updated logger.go:42: 15:06:19 | operator-self-healing/12-drop-finalizer | test step completed 12-drop-finalizer logger.go:42: 15:06:19 | operator-self-healing | operator-self-healing events from ns kuttl-test-creative-silkworm: logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:25 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp Scheduled Successfully assigned kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-jlcgp to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:25 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-6b56d66f99 SuccessfulCreate Created pod: percona-server-mysql-operator-6b56d66f99-jlcgp logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:25 +0000 UTC Normal Deployment.apps percona-server-mysql-operator ScalingReplicaSet Scaled up replica set percona-server-mysql-operator-6b56d66f99 to 1 logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:27 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:28 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-6b56d66f99-jlcgp_e4c562d2-4a5b-4de7-908f-55fb9144eea9 became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:28 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-creative-silkworm/mysql-client to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:28 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 512.467478ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:28 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp.spec.containers{manager} Created Created container manager logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:28 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp.spec.containers{manager} Started Started container manager logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.25" already present on machine logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:55:29 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-controller-manager-7895996f4-jzzzf to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-controller-manager-7895996f4-s547j to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-controller-manager-7895996f4-wv9zn to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7895996f4 SuccessfulCreate Created pod: chaos-controller-manager-7895996f4-jzzzf logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7895996f4 SuccessfulCreate Created pod: chaos-controller-manager-7895996f4-s547j logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-7895996f4 SuccessfulCreate Created pod: chaos-controller-manager-7895996f4-wv9zn logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Deployment.apps chaos-controller-manager ScalingReplicaSet Scaled up replica set chaos-controller-manager-7895996f4 to 3 logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-daemon-h8vkr Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-daemon-h8vkr to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-daemon-mhk49 Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-daemon-mhk49 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal Pod chaos-daemon-pxbxw Scheduled Successfully assigned kuttl-test-creative-silkworm/chaos-daemon-pxbxw to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-h8vkr logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-pxbxw logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:20 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-mhk49 logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-daemon-h8vkr.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-daemon-mhk49.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:21 +0000 UTC Normal Pod chaos-daemon-pxbxw.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-h8vkr.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.303277136s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-h8vkr.spec.containers{chaos-daemon} Created Created container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-h8vkr.spec.containers{chaos-daemon} Started Started container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-mhk49.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.128850623s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-mhk49.spec.containers{chaos-daemon} Created Created container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-mhk49.spec.containers{chaos-daemon} Started Started container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-pxbxw.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.269440132s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-pxbxw.spec.containers{chaos-daemon} Created Created container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:31 +0000 UTC Normal Pod chaos-daemon-pxbxw.spec.containers{chaos-daemon} Started Started container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 11.816286069s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf.spec.containers{chaos-mesh} Created Created container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf.spec.containers{chaos-mesh} Started Started container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 11.769107662s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j.spec.containers{chaos-mesh} Created Created container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j.spec.containers{chaos-mesh} Started Started container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 11.895205744s logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn.spec.containers{chaos-mesh} Created Created container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn.spec.containers{chaos-mesh} Started Started container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-7895996f4-s547j_622f6452-c3be-4f44-bda2-dc520ab18d61 became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:33 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-7895996f4-s547j_622f6452-c3be-4f44-bda2-dc520ab18d61 became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 cert-manager.io Certificate request has been approved by cert-manager.io logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ca-cert-qznn9 CertificateIssued Certificate fetched from issuer successfully logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ca-cert Issuing Issuing certificate as Secret does not exist logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ca-cert Generated Stored new private key in temporary Secret resource "operator-self-healing-ca-cert-xkrd9" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ca-cert Requested Created new CertificateRequest resource "operator-self-healing-ca-cert-qznn9" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:47 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ca-cert Issuing The certificate has been successfully issued logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal Issuer.cert-manager.io operator-self-healing-pso-issuer KeyPairVerified Signing CA verified logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn WaitingForApproval Not signing CertificateRequest until it is Approved logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn cert-manager.io Certificate request has been approved by cert-manager.io logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal CertificateRequest.cert-manager.io operator-self-healing-ssl-fqngn CertificateIssued Certificate fetched from issuer successfully logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ssl Issuing Issuing certificate as Secret was previously issued by Issuer.cert-manager.io/ logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ssl Reused Reusing private key stored in existing Secret resource "test-ssl" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ssl Requested Created new CertificateRequest resource "operator-self-healing-ssl-fqngn" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:50 +0000 UTC Normal Certificate.cert-manager.io operator-self-healing-ssl Issuing The certificate has been successfully issued logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:54 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:54 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:54 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-silkworm/datadir-operator-self-healing-mysql-0" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:54 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-0 Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql success logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:54 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:58 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-58307f52-a00d-4d01-b36a-ce5118d6da81 logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:56:59 +0000 UTC Normal Pod operator-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-mysql-0 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:00 +0000 UTC Normal Pod operator-self-healing-orc-0 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-orc-0 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:00 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-0 in StatefulSet operator-self-healing-orc successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 379.713958ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:02 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:04 +0000 UTC Normal Pod operator-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-58307f52-a00d-4d01-b36a-ce5118d6da81" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:04 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:04 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 361.067734ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:04 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Created Created container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:05 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Started Started container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:05 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:05 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 360.719022ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:05 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:05 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:06 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:07 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 482.469169ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:07 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:07 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:09 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:09 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 346.736121ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:09 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:09 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:09 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 364.516696ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 601.586263ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:10 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:11 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:36 +0000 UTC Normal Pod operator-self-healing-orc-1 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-orc-1 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:36 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-1 in StatefulSet operator-self-healing-orc successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:37 +0000 UTC Warning Pod operator-self-healing-orc-1 FailedMount MountVolume.SetUp failed for volume "config" : failed to sync configmap cache: timed out waiting for the condition logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:37 +0000 UTC Warning Pod operator-self-healing-orc-1 FailedMount MountVolume.SetUp failed for volume "tls" : failed to sync secret cache: timed out waiting for the condition logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:37 +0000 UTC Warning Pod operator-self-healing-orc-1 FailedMount MountVolume.SetUp failed for volume "users" : failed to sync secret cache: timed out waiting for the condition logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:38 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:38 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 373.789729ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:38 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:39 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-silkworm/datadir-operator-self-healing-mysql-1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-1 Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql success logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:40 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 385.937682ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Created Created container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Started Started container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 432.918365ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:41 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:43 +0000 UTC Normal Pod operator-self-healing-haproxy-0 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-0 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:43 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-0 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-d63f5678-cdc3-4416-8b5f-3a43c25d6bbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 331.620841ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:44 +0000 UTC Normal Pod operator-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-mysql-1 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:45 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:45 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 356.922727ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 411.353731ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal Pod operator-self-healing-haproxy-1 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-1 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:46 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-1 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:47 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:47 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 386.472999ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:47 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:48 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:49 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:49 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 380.491681ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:49 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:49 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:49 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:50 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 399.399476ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:50 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:50 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:50 +0000 UTC Normal Pod operator-self-healing-haproxy-2 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-2 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:50 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-2 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:51 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:51 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 401.148172ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:51 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:51 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:52 +0000 UTC Normal Pod operator-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-d63f5678-cdc3-4416-8b5f-3a43c25d6bbd" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:53 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 396.402367ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 652.554926ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 411.661689ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:54 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:55 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:55 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 368.728683ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 379.792026ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:56 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:57 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 386.840395ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:57 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:57:57 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:13 +0000 UTC Normal Pod operator-self-healing-orc-2 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-orc-2 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:13 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-2 in StatefulSet operator-self-healing-orc successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Warning Pod operator-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 385.579709ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Created Created container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:14 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Started Started container orc-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:15 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 395.486072ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Created Created container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Started Started container orc logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 386.330974ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:16 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:18 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 423.087995ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:48 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:48 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:48 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-creative-silkworm/datadir-operator-self-healing-mysql-2" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:48 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-2 Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql success logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:48 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:53 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-a8da6989-595b-427d-91ff-0b260bd524d1 logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:53 +0000 UTC Normal Pod operator-self-healing-mysql-2 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-mysql-2 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-sbvs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:58 +0000 UTC Normal Pod operator-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-a8da6989-595b-427d-91ff-0b260bd524d1" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:58:59 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:00 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 407.648583ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:00 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:00 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:01 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 382.065998ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Created Created container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 361.143384ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:02 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:03 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 371.815684ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:03 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:03 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:20 +0000 UTC Warning Pod operator-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:20 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 14:59:24 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 382.485162ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator FinalizerInited Finalizer has been inited logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Applied Successfully apply chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-jlcgp logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-jlcgp.spec.containers{manager} Killing Stopping container manager logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs Scheduled Successfully assigned kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-rvxgs to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:39 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-6b56d66f99 SuccessfulCreate Created pod: percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:42 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:42 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 405.298857ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:42 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Created Created container manager logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:00:42 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Started Started container manager logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:01 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-6b56d66f99-rvxgs_77ebab8d-6706-4bfc-8ea4-8c6ddd3de147 became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:27 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-3 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:27 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:27 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:28 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 401.655311ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:28 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:28 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 362.60858ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 372.491488ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:30 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:31 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:31 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-4 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:31 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:32 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:32 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 382.145772ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:32 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:32 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 374.08214ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:34 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:35 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 503.5339ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:35 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:35 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator FinalizerInited Finalizer has been inited logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Started Experiment has started logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Applied Successfully apply chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:49 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org percona-server-mysql-operator-6b56d66f99-rvxgs Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:01:51 +0000 UTC Warning Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Unhealthy Readiness probe failed: Get "http://10.6.88.29:8081/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:01 +0000 UTC Warning Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Unhealthy Liveness probe failed: Get "http://10.6.88.29:8081/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:01 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 384.407718ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:41 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Killing Container manager failed liveness probe, will be restarted logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:41 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 416.954577ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator TimeUp Time up according to the duration logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Recovered Successfully recover chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:02:49 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org percona-server-mysql-operator-6b56d66f99-rvxgs Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:14 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-6b56d66f99-rvxgs_a85276fc-2a34-4f23-a85a-eaef24297589 became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:22 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Killing Stopping container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:22 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Killing Stopping container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:22 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:23 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Killing Stopping container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:24 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Killing Stopping container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:24 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator FinalizerInited Finalizer has been inited logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Started Experiment has started logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Applied Successfully apply chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:03:39 +0000 UTC Normal Pod percona-server-mysql-operator-6b56d66f99-rvxgs.spec.containers{manager} Killing Container manager definition changed, will be restarted logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:04:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator TimeUp Time up according to the duration logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:04:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:04:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Recovered Successfully recover chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-rvxgs logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:04:39 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:04:56 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-6b56d66f99-rvxgs_128de603-8d05-4168-baf7-d9317d6dbddb became leader logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:10 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-3 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-dd2p logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:11 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:11 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 407.43934ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:11 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:11 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:12 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 357.989824ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 374.105967ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:13 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-creative-silkworm/operator-self-healing-haproxy-4 to gke-jen-ps-424-70568ae-7-default-pool-aeac7083-zvbd logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:14 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-424-70568ae" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:15 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-424-70568ae" in 410.610535ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:15 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:15 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:17 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:17 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 369.199141ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:17 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:17 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:17 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:18 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 441.351966ms logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:18 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:18 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:40 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator FinalizerInited Finalizer has been removed logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:40 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator FinalizerInited Finalizer has been removed logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Deleted Experiment has been deleted logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update desiredPhase of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Recovered Successfully recover chaos for kuttl-test-creative-silkworm/percona-server-mysql-operator-6b56d66f99-jlcgp logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update records of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator FinalizerInited Finalizer has been removed logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:42 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update finalizer of resource logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:53 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-jzzzf.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:53 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-s547j.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:53 +0000 UTC Normal Pod chaos-controller-manager-7895996f4-wv9zn.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:53 +0000 UTC Normal Pod chaos-daemon-h8vkr.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:53 +0000 UTC Normal Pod chaos-daemon-pxbxw.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | 2023-08-17 15:05:54 +0000 UTC Normal Pod chaos-daemon-mhk49.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon logger.go:42: 15:06:19 | operator-self-healing | Deleting namespace: kuttl-test-creative-silkworm === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (713.55s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/operator-self-healing (709.83s) PASS