=== RUN kuttl harness.go:462: starting setup harness.go:252: running tests using configured kubeconfig. harness.go:275: Successful connection to cluster at: https://34.173.52.98 harness.go:360: running tests harness.go:73: going to run test suite with timeout of 180 seconds for each step harness.go:372: testsuite: e2e-tests/tests has 30 tests === RUN kuttl/harness === RUN kuttl/harness/operator-self-healing === PAUSE kuttl/harness/operator-self-healing === CONT kuttl/harness/operator-self-healing logger.go:42: 14:43:41 | operator-self-healing | Creating namespace: kuttl-test-modest-squid logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | starting test step 0-deploy-operator logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_temp_dir # do this only in the first TestStep deploy_operator deploy_non_tls_cluster_secrets deploy_tls_cluster_secrets deploy_client] logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | + source ../../functions logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ realpath ../../.. logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++++ pwd logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++ test_name=operator-self-healing logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ GIT_BRANCH=PR-523 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export VERSION=PR-523-f00253e logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ VERSION=PR-523-f00253e logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++++ which gdate logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | ++++ which date logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ date=/usr/bin/date logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ command -v oc logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ kubectl get nodes logger.go:42: 14:43:41 | operator-self-healing/0-deploy-operator | +++ grep '^minikube' logger.go:42: 14:43:42 | operator-self-healing/0-deploy-operator | + init_temp_dir logger.go:42: 14:43:42 | operator-self-healing/0-deploy-operator | + rm -rf /tmp/kuttl/ps/operator-self-healing logger.go:42: 14:43:42 | operator-self-healing/0-deploy-operator | + mkdir -p /tmp/kuttl/ps/operator-self-healing logger.go:42: 14:43:42 | operator-self-healing/0-deploy-operator | + deploy_operator logger.go:42: 14:43:42 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply --server-side --force-conflicts -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/crd.yaml logger.go:42: 14:43:43 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlbackups.ps.percona.com serverside-applied logger.go:42: 14:43:43 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqlrestores.ps.percona.com serverside-applied logger.go:42: 14:43:44 | operator-self-healing/0-deploy-operator | customresourcedefinition.apiextensions.k8s.io/perconaservermysqls.ps.percona.com serverside-applied logger.go:42: 14:43:44 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/rbac.yaml logger.go:42: 14:43:45 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator created logger.go:42: 14:43:45 | operator-self-healing/0-deploy-operator | serviceaccount/percona-server-mysql-operator-orchestrator created logger.go:42: 14:43:45 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:43:46 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:43:46 | operator-self-healing/0-deploy-operator | role.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:43:46 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-leaderelection created logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator created logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | rolebinding.rbac.authorization.k8s.io/percona-server-mysql-operator-orchestrator created logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="DISABLE_TELEMETRY").value) = "true"' logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | + yq eval '(select(documentIndex==1).spec.template.spec.containers[] | select(.name=="manager").env[] | select(.name=="LOG_LEVEL").value) = "DEBUG"' logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply -f - logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | ++ printf 'select(documentIndex==1).spec.template.spec.containers[0].image="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:43:47 | operator-self-healing/0-deploy-operator | + yq eval 'select(documentIndex==1).spec.template.spec.containers[0].image="perconalab/percona-server-mysql-operator:PR-523-f00253e"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/operator.yaml logger.go:42: 14:43:48 | operator-self-healing/0-deploy-operator | configmap/percona-server-mysql-operator-config created logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | deployment.apps/percona-server-mysql-operator created logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | + deploy_non_tls_cluster_secrets logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/secrets.yaml logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | secret/test-secrets created logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | + deploy_tls_cluster_secrets logger.go:42: 14:43:49 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/ssl-secret.yaml logger.go:42: 14:43:50 | operator-self-healing/0-deploy-operator | secret/test-ssl created logger.go:42: 14:43:50 | operator-self-healing/0-deploy-operator | + deploy_client logger.go:42: 14:43:50 | operator-self-healing/0-deploy-operator | + kubectl -n kuttl-test-modest-squid apply -f /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/client.yaml logger.go:42: 14:43:51 | operator-self-healing/0-deploy-operator | pod/mysql-client created logger.go:42: 14:43:52 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:43:52 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:43:53 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:43:54 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:43:54 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:43:55 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:43:56 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:43:56 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:43:56 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:43:58 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:43:58 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:43:58 | operator-self-healing/0-deploy-operator | ASSERT FAIL Resource(s) not found. logger.go:42: 14:43:59 | operator-self-healing/0-deploy-operator | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:43:59 | operator-self-healing/0-deploy-operator | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:44:00 | operator-self-healing/0-deploy-operator | INFO Found 1 resource(s). logger.go:42: 14:44:00 | operator-self-healing/0-deploy-operator | NAME NAMESPACE COL0 logger.go:42: 14:44:00 | operator-self-healing/0-deploy-operator | percona-server-mysql-operator kuttl-test-modest-squid 1 logger.go:42: 14:44:00 | operator-self-healing/0-deploy-operator | ASSERT PASS logger.go:42: 14:44:00 | operator-self-healing/0-deploy-operator | test step completed 0-deploy-operator logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | starting test step 1-deploy-chaos-mesh logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions deploy_chaos_mesh] logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | + source ../../functions logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ realpath ../../.. logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++++ pwd logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ GIT_BRANCH=PR-523 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export VERSION=PR-523-f00253e logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ VERSION=PR-523-f00253e logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++++ which gdate logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++++ which date logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ command -v oc logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ kubectl get nodes logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | +++ grep '^minikube' logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | + deploy_chaos_mesh logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ tail -n1 logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:00 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:01 | operator-self-healing/1-deploy-chaos-mesh | + local chaos_mesh_ns= logger.go:42: 14:44:01 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl api-resources logger.go:42: 14:44:01 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:44:01 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get crd logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete crd logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:44:02 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:44:03 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ grep validate-auth logger.go:42: 14:44:04 | operator-self-healing/1-deploy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | + : logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | + helm repo add chaos-mesh https://charts.chaos-mesh.org logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | "chaos-mesh" has been added to your repositories logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | + '[' -n '' ']' logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | + helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=kuttl-test-modest-squid --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:05 | operator-self-healing/1-deploy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | NAME: chaos-mesh logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | LAST DEPLOYED: Mon Feb 5 14:44:21 2024 logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | NAMESPACE: kuttl-test-modest-squid logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | STATUS: deployed logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | REVISION: 1 logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | TEST SUITE: None logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | NOTES: logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | 1. Make sure chaos-mesh components are running logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | kubectl get pods --namespace kuttl-test-modest-squid -l app.kubernetes.io/instance=chaos-mesh logger.go:42: 14:44:34 | operator-self-healing/1-deploy-chaos-mesh | + sleep 10 logger.go:42: 14:44:47 | operator-self-healing/1-deploy-chaos-mesh | test step completed 1-deploy-chaos-mesh logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | starting test step 2-create-cluster logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | + source ../../functions logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ realpath ../../.. logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++++ pwd logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++ test_name=operator-self-healing logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ GIT_BRANCH=PR-523 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export VERSION=PR-523-f00253e logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ VERSION=PR-523-f00253e logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++++ which gdate logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | ++++ which date logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ date=/usr/bin/date logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ command -v oc logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ kubectl get nodes logger.go:42: 14:44:47 | operator-self-healing/2-create-cluster | +++ grep '^minikube' logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + get_cr logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + local name_suffix= logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval .spec.mysql.size=3 - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + kubectl -n kuttl-test-modest-squid apply -f - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + '[' -n '' ']' logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:44:48 | operator-self-healing/2-create-cluster | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:44:49 | operator-self-healing/2-create-cluster | perconaservermysql.ps.percona.com/operator-self-healing created logger.go:42: 14:48:08 | operator-self-healing/2-create-cluster | test step completed 2-create-cluster logger.go:42: 14:48:08 | operator-self-healing/3-write-data | starting test step 3-write-data logger.go:42: 14:48:08 | operator-self-healing/3-write-data | running command: [sh -c set -o errexit set -o xtrace source ../../functions run_mysql \ "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password" run_mysql \ "INSERT myDB.myTable (id) VALUES (100500)" \ "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password"] logger.go:42: 14:48:08 | operator-self-healing/3-write-data | + source ../../functions logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ realpath ../../.. logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++++ pwd logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++ test_name=operator-self-healing logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++++ which gdate logger.go:42: 14:48:08 | operator-self-healing/3-write-data | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:08 | operator-self-healing/3-write-data | ++++ which date logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ date=/usr/bin/date logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ command -v oc logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ kubectl get nodes logger.go:42: 14:48:08 | operator-self-healing/3-write-data | +++ grep '^minikube' logger.go:42: 14:48:09 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 14:48:09 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-modest-squid get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:48:09 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 14:48:09 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 14:48:09 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 14:48:09 | operator-self-healing/3-write-data | + run_mysql 'CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:09 | operator-self-healing/3-write-data | + local 'command=CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)' logger.go:42: 14:48:09 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:09 | operator-self-healing/3-write-data | + local pod= logger.go:42: 14:48:09 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 14:48:09 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-modest-squid get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 14:48:10 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-modest-squid exec mysql-client -- bash -c 'printf '\''%s\n'\'' "CREATE DATABASE IF NOT EXISTS myDB; CREATE TABLE IF NOT EXISTS myDB.myTable (id int PRIMARY KEY)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 14:48:10 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:48:11 | operator-self-healing/3-write-data | + : logger.go:42: 14:48:11 | operator-self-healing/3-write-data | +++ get_cluster_name logger.go:42: 14:48:11 | operator-self-healing/3-write-data | +++ kubectl -n kuttl-test-modest-squid get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:48:12 | operator-self-healing/3-write-data | ++ get_haproxy_svc operator-self-healing logger.go:42: 14:48:12 | operator-self-healing/3-write-data | ++ local cluster=operator-self-healing logger.go:42: 14:48:12 | operator-self-healing/3-write-data | ++ echo operator-self-healing-haproxy logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + run_mysql 'INSERT myDB.myTable (id) VALUES (100500)' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + local 'command=INSERT myDB.myTable (id) VALUES (100500)' logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + local pod= logger.go:42: 14:48:12 | operator-self-healing/3-write-data | ++ get_client_pod logger.go:42: 14:48:12 | operator-self-healing/3-write-data | ++ kubectl -n kuttl-test-modest-squid get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + client_pod=mysql-client logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + wait_pod mysql-client logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + local pod=mysql-client logger.go:42: 14:48:12 | operator-self-healing/3-write-data | + set +o xtrace logger.go:42: 14:48:13 | operator-self-healing/3-write-data | mysql-clienttrue logger.go:42: 14:48:13 | operator-self-healing/3-write-data | + sed -e 's/mysql: //' logger.go:42: 14:48:13 | operator-self-healing/3-write-data | + grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:48:13 | operator-self-healing/3-write-data | + kubectl -n kuttl-test-modest-squid exec mysql-client -- bash -c 'printf '\''%s\n'\'' "INSERT myDB.myTable (id) VALUES (100500)" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:14 | operator-self-healing/3-write-data | + : logger.go:42: 14:48:15 | operator-self-healing/3-write-data | test step completed 3-write-data logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | starting test step 4-read-from-primary logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | running command: [sh -c set -o errexit set -o xtrace source ../../functions data=$(run_mysql "SELECT * FROM myDB.myTable" "-h $(get_haproxy_svc $(get_cluster_name)) -uroot -proot_password") kubectl create configmap -n "${NAMESPACE}" 04-read-from-primary --from-literal=data="${data}"] logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | + source ../../functions logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ realpath ../../.. logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ pwd logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ test_name=operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ which gdate logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ which date logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ date=/usr/bin/date logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ command -v oc logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ kubectl get nodes logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ grep '^minikube' logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ get_cluster_name logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++++ kubectl -n kuttl-test-modest-squid get ps -o 'jsonpath={.items[0].metadata.name}' logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ get_haproxy_svc operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ local cluster=operator-self-healing logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ echo operator-self-healing-haproxy logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ run_mysql 'SELECT * FROM myDB.myTable' '-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ local 'command=SELECT * FROM myDB.myTable' logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ local 'uri=-h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | ++ local pod= logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ get_client_pod logger.go:42: 14:48:15 | operator-self-healing/4-read-from-primary | +++ kubectl -n kuttl-test-modest-squid get pods --selector=name=mysql-client -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ client_pod=mysql-client logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ wait_pod mysql-client logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ local pod=mysql-client logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ set +o xtrace logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | mysql-clienttrue logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ kubectl -n kuttl-test-modest-squid exec mysql-client -- bash -c 'printf '\''%s\n'\'' "SELECT * FROM myDB.myTable" | mysql -sN -h operator-self-healing-haproxy -uroot -proot_password' logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ sed -e 's/mysql: //' logger.go:42: 14:48:16 | operator-self-healing/4-read-from-primary | ++ grep -v 'Using a password on the command line interface can be insecure.' logger.go:42: 14:48:17 | operator-self-healing/4-read-from-primary | + data=100500 logger.go:42: 14:48:17 | operator-self-healing/4-read-from-primary | + kubectl create configmap -n kuttl-test-modest-squid 04-read-from-primary --from-literal=data=100500 logger.go:42: 14:48:18 | operator-self-healing/4-read-from-primary | configmap/04-read-from-primary created logger.go:42: 14:48:19 | operator-self-healing/4-read-from-primary | test step completed 4-read-from-primary logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | starting test step 5-kill-pod logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | running command: [sh -c set -o errexit set -o xtrace source ../../functions init_pod=$(get_operator_pod) kill_pods "${OPERATOR_NS:-$NAMESPACE}" "pod" "$init_pod" "" "operator" sleep 10 # wait a bit for pod to be killed wait_deployment percona-server-mysql-operator "${OPERATOR_NS:-$NAMESPACE}" if [ "$init_pod" == "$(get_operator_pod)" ]; then echo "operator pod was not killed! something went wrong." exit 1 fi] logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | + source ../../functions logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ realpath ../../.. logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++++ pwd logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++ test_name=operator-self-healing logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++++ which gdate logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++++ which date logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ date=/usr/bin/date logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ command -v oc logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ kubectl get nodes logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | +++ grep '^minikube' logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 14:48:19 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n kuttl-test-modest-squid --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + init_pod=percona-server-mysql-operator-b4c599bbb-m9blw logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + kill_pods kuttl-test-modest-squid pod percona-server-mysql-operator-b4c599bbb-m9blw '' operator logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + local ns=kuttl-test-modest-squid logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + local selector=pod logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + local pod_label=percona-server-mysql-operator-b4c599bbb-m9blw logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + local label_value= logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + local chaos_suffix=operator logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + '[' pod == pod ']' logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + yq eval ' logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | .metadata.name = "chaos-pod-kill-operator" | logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | del(.spec.selector.pods.test-namespace) | logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | .spec.selector.pods.kuttl-test-modest-squid[0] = "percona-server-mysql-operator-b4c599bbb-m9blw"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/chaos-pod-kill.yml logger.go:42: 14:48:20 | operator-self-healing/5-kill-pod | + kubectl apply --namespace kuttl-test-modest-squid -f - logger.go:42: 14:48:22 | operator-self-healing/5-kill-pod | podchaos.chaos-mesh.org/chaos-pod-kill-operator created logger.go:42: 14:48:22 | operator-self-healing/5-kill-pod | + sleep 5 logger.go:42: 14:48:27 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 14:48:37 | operator-self-healing/5-kill-pod | + wait_deployment percona-server-mysql-operator kuttl-test-modest-squid logger.go:42: 14:48:37 | operator-self-healing/5-kill-pod | + local name=percona-server-mysql-operator logger.go:42: 14:48:37 | operator-self-healing/5-kill-pod | + local target_namespace=kuttl-test-modest-squid logger.go:42: 14:48:37 | operator-self-healing/5-kill-pod | + sleep 10 logger.go:42: 14:48:47 | operator-self-healing/5-kill-pod | + set +o xtrace logger.go:42: 14:48:48 | operator-self-healing/5-kill-pod | percona-server-mysql-operator logger.go:42: 14:48:48 | operator-self-healing/5-kill-pod | ++ get_operator_pod logger.go:42: 14:48:48 | operator-self-healing/5-kill-pod | ++ kubectl get pods -n kuttl-test-modest-squid --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:48:48 | operator-self-healing/5-kill-pod | + '[' percona-server-mysql-operator-b4c599bbb-m9blw == percona-server-mysql-operator-b4c599bbb-5rhlj ']' logger.go:42: 14:48:49 | operator-self-healing/5-kill-pod | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:48:49 | operator-self-healing/5-kill-pod | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:48:50 | operator-self-healing/5-kill-pod | INFO Found 1 resource(s). logger.go:42: 14:48:50 | operator-self-healing/5-kill-pod | NAME NAMESPACE COL0 logger.go:42: 14:48:50 | operator-self-healing/5-kill-pod | percona-server-mysql-operator kuttl-test-modest-squid 1 logger.go:42: 14:48:50 | operator-self-healing/5-kill-pod | ASSERT PASS logger.go:42: 14:48:50 | operator-self-healing/5-kill-pod | test step completed 5-kill-pod logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | starting test step 6-scale-up logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + source ../../functions logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ realpath ../../.. logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++++ pwd logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ test_name=operator-self-healing logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ GIT_BRANCH=PR-523 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export VERSION=PR-523-f00253e logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ VERSION=PR-523-f00253e logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++++ which gdate logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++++ which date logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ date=/usr/bin/date logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ command -v oc logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ kubectl get nodes logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | +++ grep '^minikube' logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + get_cr logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + local name_suffix= logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + kubectl -n kuttl-test-modest-squid apply -f - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + '[' -n '' ']' logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:48:50 | operator-self-healing/6-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:48:52 | operator-self-healing/6-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 14:49:07 | operator-self-healing/6-scale-up | test step completed 6-scale-up logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | starting test step 7-network-loss logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | running command: [sh -c set -o errexit set -o xtrace source ../../functions network_loss "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for network loss to happen] logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | + source ../../functions logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ realpath ../../.. logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++++ pwd logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++ test_name=operator-self-healing logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ GIT_BRANCH=PR-523 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export VERSION=PR-523-f00253e logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ VERSION=PR-523-f00253e logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++++ which gdate logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++++ which date logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ date=/usr/bin/date logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ command -v oc logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ kubectl get nodes logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | +++ grep '^minikube' logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++ get_operator_pod logger.go:42: 14:49:07 | operator-self-healing/7-network-loss | ++ kubectl get pods -n kuttl-test-modest-squid --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + network_loss kuttl-test-modest-squid percona-server-mysql-operator-b4c599bbb-5rhlj operator logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + local ns=kuttl-test-modest-squid logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + local pod=percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + local chaos_suffix=operator logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + yq eval ' logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | .metadata.name = "chaos-pod-network-loss-operator" | logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | del(.spec.selector.pods.test-namespace) | logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | .spec.selector.pods.kuttl-test-modest-squid[0] = "percona-server-mysql-operator-b4c599bbb-5rhlj"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/chaos-network-loss.yml logger.go:42: 14:49:08 | operator-self-healing/7-network-loss | + kubectl apply --namespace kuttl-test-modest-squid -f - logger.go:42: 14:49:09 | operator-self-healing/7-network-loss | networkchaos.chaos-mesh.org/chaos-pod-network-loss-operator created logger.go:42: 14:49:09 | operator-self-healing/7-network-loss | + sleep 5 logger.go:42: 14:49:14 | operator-self-healing/7-network-loss | + sleep 30 logger.go:42: 14:49:45 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:45 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:45 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:47 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:47 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:47 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:48 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:48 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:49 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:50 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:50 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:50 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:52 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:52 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:52 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:53 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:53 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:54 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:55 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:55 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:56 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:57 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:57 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:57 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:49:59 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:49:59 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:49:59 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:00 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:00 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:01 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:02 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:02 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:02 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:04 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:04 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:04 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:05 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:05 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:06 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:07 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:07 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:08 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:09 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:09 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:09 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:10 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:11 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:11 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:12 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:12 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:13 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:14 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:14 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:14 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:16 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:16 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:16 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:17 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:17 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:18 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:19 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:19 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:19 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:21 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:21 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:21 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:22 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:22 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:23 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:24 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:24 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:25 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:26 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:26 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:26 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:28 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:28 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:28 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:29 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:29 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:30 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:31 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:31 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:32 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:33 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:33 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:33 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:35 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:35 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:35 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:36 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:36 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:37 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:38 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:38 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:38 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:40 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:40 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:40 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:42 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:42 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:42 | operator-self-healing/7-network-loss | ASSERT FAIL Resource(s) not found. logger.go:42: 14:50:43 | operator-self-healing/7-network-loss | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:50:43 | operator-self-healing/7-network-loss | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:50:44 | operator-self-healing/7-network-loss | INFO Found 1 resource(s). logger.go:42: 14:50:44 | operator-self-healing/7-network-loss | NAME NAMESPACE COL0 logger.go:42: 14:50:44 | operator-self-healing/7-network-loss | percona-server-mysql-operator kuttl-test-modest-squid 1 logger.go:42: 14:50:44 | operator-self-healing/7-network-loss | ASSERT PASS logger.go:42: 14:50:44 | operator-self-healing/7-network-loss | test step completed 7-network-loss logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | starting test step 8-scale-down logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=3' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + source ../../functions logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ realpath ../../.. logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++++ pwd logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ test_name=operator-self-healing logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ GIT_BRANCH=PR-523 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export VERSION=PR-523-f00253e logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ VERSION=PR-523-f00253e logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++++ which gdate logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++++ which date logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ date=/usr/bin/date logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ command -v oc logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ kubectl get nodes logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | +++ grep '^minikube' logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + get_cr logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + local name_suffix= logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval .spec.mysql.size=3 - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.size=3 - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + kubectl -n kuttl-test-modest-squid apply -f - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + '[' -n '' ']' logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:50:44 | operator-self-healing/8-scale-down | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:50:45 | operator-self-healing/8-scale-down | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 14:51:00 | operator-self-healing/8-scale-down | test step completed 8-scale-down logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | starting test step 9-pod-failure logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | running command: [sh -c set -o errexit set -o xtrace source ../../functions failure_pod "${OPERATOR_NS:-$NAMESPACE}" "$(get_operator_pod)" "operator" sleep 30 # wait for pod failure to happen] logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | + source ../../functions logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ realpath ../../.. logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++++ pwd logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++ test_name=operator-self-healing logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ GIT_BRANCH=PR-523 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export VERSION=PR-523-f00253e logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ VERSION=PR-523-f00253e logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++++ which gdate logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | ++++ which date logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ date=/usr/bin/date logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ command -v oc logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ kubectl get nodes logger.go:42: 14:51:00 | operator-self-healing/9-pod-failure | +++ grep '^minikube' logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | ++ get_operator_pod logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | ++ kubectl get pods -n kuttl-test-modest-squid --selector=app.kubernetes.io/name=percona-server-mysql-operator -o 'jsonpath={.items[].metadata.name}' logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + failure_pod kuttl-test-modest-squid percona-server-mysql-operator-b4c599bbb-5rhlj operator logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + local ns=kuttl-test-modest-squid logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + local pod=percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + local chaos_suffix=operator logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + yq eval ' logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | .metadata.name = "chaos-pod-failure-operator" | logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | del(.spec.selector.pods.test-namespace) | logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | .spec.selector.pods.kuttl-test-modest-squid[0] = "percona-server-mysql-operator-b4c599bbb-5rhlj"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf/chaos-pod-failure.yml logger.go:42: 14:51:01 | operator-self-healing/9-pod-failure | + kubectl apply --namespace kuttl-test-modest-squid -f - logger.go:42: 14:51:02 | operator-self-healing/9-pod-failure | podchaos.chaos-mesh.org/chaos-pod-failure-operator created logger.go:42: 14:51:02 | operator-self-healing/9-pod-failure | + sleep 5 logger.go:42: 14:51:07 | operator-self-healing/9-pod-failure | + sleep 30 logger.go:42: 14:51:39 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:39 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:39 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:40 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:40 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:41 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:42 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:42 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:42 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:44 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:44 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:44 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:45 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:45 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:46 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:47 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:47 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:48 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:49 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:49 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:49 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:51 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:51 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:51 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:52 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:52 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:53 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:54 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:54 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:54 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:56 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:56 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:56 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:57 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:57 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:51:58 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:51:59 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:51:59 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:00 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:01 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:01 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:01 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:02 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:03 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:03 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:04 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:04 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:05 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:06 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:06 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:06 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:08 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:08 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:08 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:09 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:10 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:10 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:11 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:11 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:12 | operator-self-healing/9-pod-failure | ASSERT FAIL Resource(s) not found. logger.go:42: 14:52:13 | operator-self-healing/9-pod-failure | running command: [sh -c kubectl assert exist-enhanced deployment percona-server-mysql-operator -n ${OPERATOR_NS:-$NAMESPACE} --field-selector status.readyReplicas=1] logger.go:42: 14:52:13 | operator-self-healing/9-pod-failure | ASSERT deployment percona-server-mysql-operator matching field criteria 'status.readyReplicas=1' should exist. logger.go:42: 14:52:14 | operator-self-healing/9-pod-failure | INFO Found 1 resource(s). logger.go:42: 14:52:14 | operator-self-healing/9-pod-failure | NAME NAMESPACE COL0 logger.go:42: 14:52:14 | operator-self-healing/9-pod-failure | percona-server-mysql-operator kuttl-test-modest-squid 1 logger.go:42: 14:52:14 | operator-self-healing/9-pod-failure | ASSERT PASS logger.go:42: 14:52:14 | operator-self-healing/9-pod-failure | test step completed 9-pod-failure logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | starting test step 10-scale-up logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | running command: [sh -c set -o errexit set -o xtrace source ../../functions get_cr \ | yq eval '.spec.mysql.clusterType="async"' - \ | yq eval '.spec.mysql.size=3' - \ | yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.proxy.haproxy.enabled=true' - \ | yq eval '.spec.proxy.haproxy.size=5' - \ | yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - \ | yq eval '.spec.orchestrator.enabled=true' - \ | yq eval '.spec.orchestrator.size=3' - \ | yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - \ | kubectl -n "${NAMESPACE}" apply -f -] logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + source ../../functions logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ realpath ../../.. logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++++ pwd logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ test_name=operator-self-healing logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ GIT_BRANCH=PR-523 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export VERSION=PR-523-f00253e logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ VERSION=PR-523-f00253e logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++++ which gdate logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++++ which date logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ date=/usr/bin/date logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ command -v oc logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ kubectl get nodes logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | +++ grep '^minikube' logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + get_cr logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + local name_suffix= logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval .spec.mysql.size=3 - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.metadata.name="%s"' operator-self-healing logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.metadata.name="operator-self-healing"' /mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy/cr.yaml logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.size=5 - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval .spec.proxy.haproxy.enabled=true - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.enabled=true - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + kubectl -n kuttl-test-modest-squid apply -f - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.secretsName="test-secrets"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.initImage="%s"' perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.upgradeOptions.apply="disabled"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.initImage="perconalab/percona-server-mysql-operator:PR-523-f00253e"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.haproxy.image="%s"' perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.clusterType="async"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.haproxy.image="perconalab/percona-server-mysql-operator:main-haproxy"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.proxy.router.image="%s"' perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.toolkit.image="%s"' perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.mysql.image="%s"' perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.mysql.image="perconalab/percona-server-mysql-operator:main-psmysql"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.proxy.router.image="perconalab/percona-server-mysql-operator:main-router"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.toolkit.image="perconalab/percona-server-mysql-operator:main-toolkit"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval .spec.orchestrator.size=3 - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.affinity.antiAffinityTopologyKey="none"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.orchestrator.image="%s"' perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.sslSecretName="test-ssl"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.backup.image="%s"' perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.orchestrator.image="perconalab/percona-server-mysql-operator:main-orchestrator"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.backup.image="perconalab/percona-server-mysql-operator:main-backup"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | ++ printf '.spec.pmm.image="%s"' perconalab/pmm-client:dev-latest logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval '.spec.pmm.image="perconalab/pmm-client:dev-latest"' - logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + '[' -n '' ']' logger.go:42: 14:52:14 | operator-self-healing/10-scale-up | + yq eval - logger.go:42: 14:52:15 | operator-self-healing/10-scale-up | perconaservermysql.ps.percona.com/operator-self-healing configured logger.go:42: 14:52:36 | operator-self-healing/10-scale-up | test step completed 10-scale-up logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | starting test step 11-destroy-chaos-mesh logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | running command: [sh -c set -o errexit set -o xtrace source ../../functions destroy_chaos_mesh] logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | + source ../../functions logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ realpath ../../.. logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++++ pwd logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ basename /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/tests/operator-self-healing logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++ test_name=operator-self-healing logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++ source /mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/vars.sh logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ ROOT_REPO=/mnt/jenkins/workspace/cloud-ps-operator_PR-523 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ DEPLOY_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/deploy logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ TESTS_CONFIG_DIR=/mnt/jenkins/workspace/cloud-ps-operator_PR-523/e2e-tests/conf logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ TEMP_DIR=/tmp/kuttl/ps/operator-self-healing logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++++ git rev-parse --abbrev-ref HEAD logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export GIT_BRANCH=PR-523 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ GIT_BRANCH=PR-523 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export VERSION=PR-523-f00253e logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ VERSION=PR-523-f00253e logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE=perconalab/percona-server-mysql-operator:PR-523-f00253e logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_MYSQL=perconalab/percona-server-mysql-operator:main-psmysql logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_BACKUP=perconalab/percona-server-mysql-operator:main-backup logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ORCHESTRATOR=perconalab/percona-server-mysql-operator:main-orchestrator logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_ROUTER=perconalab/percona-server-mysql-operator:main-router logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_TOOLKIT=perconalab/percona-server-mysql-operator:main-toolkit logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_HAPROXY=perconalab/percona-server-mysql-operator:main-haproxy logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ PMM_SERVER_VERSION=9.9.9 logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_CLIENT=perconalab/pmm-client:dev-latest logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ export IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ IMAGE_PMM_SERVER=perconalab/pmm-server:dev-latest logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++++ which gdate logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | which: no gdate in (/mnt/jenkins/workspace/cloud-ps-operator_PR-523/bin/:/home/ec2-user/.krew/bin:/usr/local/bin:/usr/bin) logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | ++++ which date logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ date=/usr/bin/date logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ command -v oc logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ kubectl get nodes logger.go:42: 14:52:36 | operator-self-healing/11-destroy-chaos-mesh | +++ grep '^minikube' logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | + destroy_chaos_mesh logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | ++ helm list --all-namespaces --filter chaos-mesh logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | ++ tail -n1 logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | ++ sed s/NAMESPACE// logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '-F ' '{print $2}' logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:52:37 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:52:38 | operator-self-healing/11-destroy-chaos-mesh | + local chaos_mesh_ns=kuttl-test-modest-squid logger.go:42: 14:52:38 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl api-resources logger.go:42: 14:52:38 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:52:38 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete awschaos --all --all-namespaces logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete azurechaos --all --all-namespaces logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:39 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete blockchaos --all --all-namespaces logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete dnschaos --all --all-namespaces logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:40 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete gcpchaos --all --all-namespaces logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete httpchaos --all --all-namespaces logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete iochaos --all --all-namespaces logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:41 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete jvmchaos --all --all-namespaces logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete kernelchaos --all --all-namespaces logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:42 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete networkchaos --all --all-namespaces logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | networkchaos.chaos-mesh.org "chaos-pod-network-loss-operator" deleted logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachinechaos --all --all-namespaces logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:43 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete physicalmachines --all --all-namespaces logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podchaos --all --all-namespaces logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-failure-operator" deleted logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | podchaos.chaos-mesh.org "chaos-pod-kill-operator" deleted logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:44 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podhttpchaos --all --all-namespaces logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podiochaos --all --all-namespaces logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:45 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete podnetworkchaos --all --all-namespaces logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | podnetworkchaos.chaos-mesh.org "percona-server-mysql-operator-b4c599bbb-5rhlj" deleted logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete remoteclusters --all --all-namespaces logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete schedules --all --all-namespaces logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:46 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete statuschecks --all --all-namespaces logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete stresschaos --all --all-namespaces logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:47 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete timechaos --all --all-namespaces logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflownodes --all --all-namespaces logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + for i in '$(kubectl api-resources | grep chaos-mesh | awk '\''{print $1}'\'')' logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete workflows --all --all-namespaces logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | No resources found logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + '[' -n kuttl-test-modest-squid ']' logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | + helm uninstall chaos-mesh --namespace kuttl-test-modest-squid logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is group-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:52:48 | operator-self-healing/11-destroy-chaos-mesh | WARNING: Kubernetes configuration file is world-readable. This is insecure. Location: /mnt/jenkins/workspace/cloud-ps-operator_PR-523/kubeconfig logger.go:42: 14:52:53 | operator-self-healing/11-destroy-chaos-mesh | release "chaos-mesh" uninstalled logger.go:42: 14:52:53 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get crd logger.go:42: 14:52:53 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh.org logger.go:42: 14:52:53 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:52:54 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete crd awschaos.chaos-mesh.org azurechaos.chaos-mesh.org blockchaos.chaos-mesh.org dnschaos.chaos-mesh.org gcpchaos.chaos-mesh.org httpchaos.chaos-mesh.org iochaos.chaos-mesh.org jvmchaos.chaos-mesh.org kernelchaos.chaos-mesh.org networkchaos.chaos-mesh.org physicalmachinechaos.chaos-mesh.org physicalmachines.chaos-mesh.org podchaos.chaos-mesh.org podhttpchaos.chaos-mesh.org podiochaos.chaos-mesh.org podnetworkchaos.chaos-mesh.org remoteclusters.chaos-mesh.org schedules.chaos-mesh.org statuschecks.chaos-mesh.org stresschaos.chaos-mesh.org timechaos.chaos-mesh.org workflownodes.chaos-mesh.org workflows.chaos-mesh.org logger.go:42: 14:52:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "awschaos.chaos-mesh.org" deleted logger.go:42: 14:52:54 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "azurechaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "blockchaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "dnschaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "gcpchaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "httpchaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "iochaos.chaos-mesh.org" deleted logger.go:42: 14:52:55 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "jvmchaos.chaos-mesh.org" deleted logger.go:42: 14:52:56 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "kernelchaos.chaos-mesh.org" deleted logger.go:42: 14:52:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "networkchaos.chaos-mesh.org" deleted logger.go:42: 14:52:57 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachinechaos.chaos-mesh.org" deleted logger.go:42: 14:52:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "physicalmachines.chaos-mesh.org" deleted logger.go:42: 14:52:58 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podchaos.chaos-mesh.org" deleted logger.go:42: 14:52:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podhttpchaos.chaos-mesh.org" deleted logger.go:42: 14:52:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podiochaos.chaos-mesh.org" deleted logger.go:42: 14:52:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "podnetworkchaos.chaos-mesh.org" deleted logger.go:42: 14:52:59 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "remoteclusters.chaos-mesh.org" deleted logger.go:42: 14:53:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "schedules.chaos-mesh.org" deleted logger.go:42: 14:53:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "statuschecks.chaos-mesh.org" deleted logger.go:42: 14:53:01 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "stresschaos.chaos-mesh.org" deleted logger.go:42: 14:53:02 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "timechaos.chaos-mesh.org" deleted logger.go:42: 14:53:04 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflownodes.chaos-mesh.org" deleted logger.go:42: 14:53:05 | operator-self-healing/11-destroy-chaos-mesh | customresourcedefinition.apiextensions.k8s.io "workflows.chaos-mesh.org" deleted logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrolebinding logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrolebinding logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get clusterrole logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:53:10 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete clusterrole logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get MutatingWebhookConfiguration logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete MutatingWebhookConfiguration logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ grep chaos-mesh logger.go:42: 14:53:11 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | ++ kubectl get ValidatingWebhookConfiguration logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | ++ grep validate-auth logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | ++ awk '{print $1}' logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | + timeout 30 kubectl delete ValidatingWebhookConfiguration logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | error: resource(s) were provided, but no name was specified logger.go:42: 14:53:12 | operator-self-healing/11-destroy-chaos-mesh | + : logger.go:42: 14:53:13 | operator-self-healing/11-destroy-chaos-mesh | test step completed 11-destroy-chaos-mesh logger.go:42: 14:53:13 | operator-self-healing/99-drop-finalizer | starting test step 99-drop-finalizer logger.go:42: 14:53:15 | operator-self-healing/99-drop-finalizer | PerconaServerMySQL:kuttl-test-modest-squid/operator-self-healing updated logger.go:42: 14:53:15 | operator-self-healing/99-drop-finalizer | test step completed 99-drop-finalizer logger.go:42: 14:53:15 | operator-self-healing | operator-self-healing events from ns kuttl-test-modest-squid: logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:49 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw Scheduled Successfully assigned kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-m9blw to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:49 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-b4c599bbb SuccessfulCreate Created pod: percona-server-mysql-operator-b4c599bbb-m9blw replicaset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:49 +0000 UTC Normal Deployment.apps percona-server-mysql-operator ScalingReplicaSet Scaled up replica set percona-server-mysql-operator-b4c599bbb to 1 deployment-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-m9blw_0f65fdec-764c-4ba4-92d0-6e41a40fe6d8 became leader percona-server-mysql-operator-b4c599bbb-m9blw_0f65fdec-764c-4ba4-92d0-6e41a40fe6d8 logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Pod mysql-client Scheduled Successfully assigned kuttl-test-modest-squid/mysql-client to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 92.490919ms (92.499974ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw.spec.containers{manager} Created Created container manager kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:51 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw.spec.containers{manager} Started Started container manager kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Pulled Container image "percona/percona-server:8.0.33" already present on machine kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Created Created container mysql-client kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:43:52 +0000 UTC Normal Pod mysql-client.spec.containers{mysql-client} Started Started container mysql-client kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:32 +0000 UTC Normal Pod chaos-daemon-7wqhx Scheduled Successfully assigned kuttl-test-modest-squid/chaos-daemon-7wqhx to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:32 +0000 UTC Normal Pod chaos-daemon-lvt9n Scheduled Successfully assigned kuttl-test-modest-squid/chaos-daemon-lvt9n to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:32 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-7wqhx daemonset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:32 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-lvt9n daemonset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn Scheduled Successfully assigned kuttl-test-modest-squid/chaos-controller-manager-d7b9476b-hsprn to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg Scheduled Successfully assigned kuttl-test-modest-squid/chaos-controller-manager-d7b9476b-qp7zg to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh Scheduled Successfully assigned kuttl-test-modest-squid/chaos-controller-manager-d7b9476b-qqgxh to gke-jen-ps-523-f00253e-4-default-pool-e964acce-bq8g default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-d7b9476b SuccessfulCreate Created pod: chaos-controller-manager-d7b9476b-hsprn replicaset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-d7b9476b SuccessfulCreate Created pod: chaos-controller-manager-d7b9476b-qqgxh replicaset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal ReplicaSet.apps chaos-controller-manager-d7b9476b SuccessfulCreate Created pod: chaos-controller-manager-d7b9476b-qp7zg replicaset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Deployment.apps chaos-controller-manager ScalingReplicaSet Scaled up replica set chaos-controller-manager-d7b9476b to 3 deployment-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-daemon-7wqhx.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-daemon-lvt9n.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-daemon-zc94l Scheduled Successfully assigned kuttl-test-modest-squid/chaos-daemon-zc94l to gke-jen-ps-523-f00253e-4-default-pool-e964acce-bq8g default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal Pod chaos-daemon-zc94l.spec.containers{chaos-daemon} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:33 +0000 UTC Normal DaemonSet.apps chaos-daemon SuccessfulCreate Created pod: chaos-daemon-zc94l daemonset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:34 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:34 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:34 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh.spec.containers{chaos-mesh} Pulling Pulling image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-7wqhx.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 10.10652466s (10.106616384s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-7wqhx.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-7wqhx.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-lvt9n.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 9.989694419s (9.989791477s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-lvt9n.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-lvt9n.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-zc94l.spec.containers{chaos-daemon} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-daemon:v2.5.1" in 9.850324632s (9.850608497s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-zc94l.spec.containers{chaos-daemon} Created Created container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:43 +0000 UTC Normal Pod chaos-daemon-zc94l.spec.containers{chaos-daemon} Started Started container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 1.891218429s (10.836243785s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 1.932710326s (10.848035885s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh.spec.containers{chaos-mesh} Pulled Successfully pulled image "ghcr.io/chaos-mesh/chaos-mesh:v2.5.1" in 1.946012999s (11.050444158s including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh.spec.containers{chaos-mesh} Created Created container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh.spec.containers{chaos-mesh} Started Started container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal ConfigMap chaos-mesh LeaderElection chaos-controller-manager-d7b9476b-hsprn_9459b8c1-eda5-47ad-98ca-ccbdbbad69e7 became leader logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:45 +0000 UTC Normal Lease.coordination.k8s.io chaos-mesh LeaderElection chaos-controller-manager-d7b9476b-hsprn_9459b8c1-eda5-47ad-98ca-ccbdbbad69e7 became leader logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 Provisioning External provisioner is provisioning volume for claim "kuttl-test-modest-squid/datadir-operator-self-healing-mysql-0" pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-0 Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-0 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:53 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-0 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:54 +0000 UTC Normal Pod operator-self-healing-orc-0 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-orc-0 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:54 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:54 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 90.703387ms (90.720324ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:54 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:54 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:57 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-0 ProvisioningSucceeded Successfully provisioned volume pvc-dc5a015c-0801-4fda-8aa8-047732f7a92d pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:44:57 +0000 UTC Normal Pod operator-self-healing-mysql-0 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-mysql-0 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-bq8g default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 118.305729ms (118.313704ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:00 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 113.602439ms (113.61415ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:01 +0000 UTC Normal Pod operator-self-healing-orc-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:09 +0000 UTC Normal Pod operator-self-healing-mysql-0 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-dc5a015c-0801-4fda-8aa8-047732f7a92d" attachdetach-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 890.767496ms (890.791689ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:12 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:13 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 83.643201ms (83.666579ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 95.9854ms (96.004042ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 79.304664ms (79.311888ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:15 +0000 UTC Normal Pod operator-self-healing-mysql-0.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:34 +0000 UTC Normal Pod operator-self-healing-orc-1 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-orc-1 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:34 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-1 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:35 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:35 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 115.046873ms (115.055288ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:35 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:35 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:36 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:36 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 81.433084ms (81.440757ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:36 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:36 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:36 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:37 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 83.64574ms (83.670641ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:37 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:37 +0000 UTC Normal Pod operator-self-healing-orc-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:45 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:45 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:45 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 Provisioning External provisioner is provisioning volume for claim "kuttl-test-modest-squid/datadir-operator-self-healing-mysql-1" pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:45 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-1 Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:45 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-1 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:49 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-1 ProvisioningSucceeded Successfully provisioned volume pvc-ba2c37b6-aae4-4624-b424-333abe601a5d pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:49 +0000 UTC Normal Pod operator-self-healing-mysql-1 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-mysql-1 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:51 +0000 UTC Normal Pod operator-self-healing-haproxy-0 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-0 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:51 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:51 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-0 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:52 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 86.141409ms (86.160491ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:52 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:52 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:53 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:53 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 106.959498ms (106.972976ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:53 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 123.495445ms (123.509509ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-0.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal Pod operator-self-healing-haproxy-1 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-1 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-bq8g default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:54 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-1 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:55 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:55 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 127.29871ms (127.31215ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:55 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:55 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:56 +0000 UTC Normal Pod operator-self-healing-mysql-1 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-ba2c37b6-aae4-4624-b424-333abe601a5d" attachdetach-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94.940531ms (94.948964ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 87.593041ms (87.608266ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:57 +0000 UTC Normal Pod operator-self-healing-haproxy-1.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:58 +0000 UTC Normal Pod operator-self-healing-haproxy-2 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-2 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:58 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-2 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:59 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:59 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 114.513898ms (114.533854ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:59 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:45:59 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:00 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:00 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 483.410135ms (483.420809ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:00 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:00 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 491.641459ms (491.660597ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:01 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 94.705578ms (94.720792ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-haproxy-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 81.965906ms (81.973982ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 87.556287ms (87.571062ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 100.008538ms (100.015779ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:02 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:09 +0000 UTC Normal StatefulSet.apps operator-self-healing-orc SuccessfulCreate create Pod operator-self-healing-orc-2 in StatefulSet operator-self-healing-orc successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:10 +0000 UTC Normal Pod operator-self-healing-orc-2 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-orc-2 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:10 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:10 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 79.074442ms (79.092175ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:10 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Created Created container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:10 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.initContainers{orc-init} Started Started container orc-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:12 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:12 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 79.32653ms (79.334343ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Created Created container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{orc} Started Started container orc kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-orchestrator" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-orchestrator" in 87.067378ms (87.077719ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:13 +0000 UTC Normal Pod operator-self-healing-orc-2.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:21 +0000 UTC Warning Pod operator-self-healing-mysql-1.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:21 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:24 +0000 UTC Normal Pod operator-self-healing-mysql-1.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 80.005926ms (80.024824ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:54 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 WaitForFirstConsumer waiting for first consumer to be created before binding persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:54 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Claim datadir-operator-self-healing-mysql-2 Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql success statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:54 +0000 UTC Normal StatefulSet.apps operator-self-healing-mysql SuccessfulCreate create Pod operator-self-healing-mysql-2 in StatefulSet operator-self-healing-mysql successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:55 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 Provisioning External provisioner is provisioning volume for claim "kuttl-test-modest-squid/datadir-operator-self-healing-mysql-2" pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:55 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ExternalProvisioning waiting for a volume to be created, either by external provisioner "pd.csi.storage.gke.io" or manually created by system administrator persistentvolume-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:58 +0000 UTC Normal PersistentVolumeClaim datadir-operator-self-healing-mysql-2 ProvisioningSucceeded Successfully provisioned volume pvc-2ecc304c-5eeb-4ef0-9365-fd4f91f136a8 pd.csi.storage.gke.io_gke-77d1c4fac69c4c2a896d-0b77-45e4-vm_8a268d8f-c2f3-4090-b361-6efbcaeaaace logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:46:59 +0000 UTC Normal Pod operator-self-healing-mysql-2 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-mysql-2 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:09 +0000 UTC Normal Pod operator-self-healing-mysql-2 SuccessfulAttachVolume AttachVolume.Attach succeeded for volume "pvc-2ecc304c-5eeb-4ef0-9365-fd4f91f136a8" attachdetach-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:10 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:10 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 97.511881ms (97.524431ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:10 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Created Created container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:11 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.initContainers{mysql-init} Started Started container mysql-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-psmysql" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 85.354632ms (85.362731ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Created Created container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Started Started container mysql kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-backup" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-backup" in 100.551761ms (100.559795ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Created Created container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{xtrabackup} Started Started container xtrabackup kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-toolkit" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-toolkit" in 154.872148ms (154.879893ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Created Created container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:13 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{pt-heartbeat} Started Started container pt-heartbeat kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:31 +0000 UTC Warning Pod operator-self-healing-mysql-2.spec.containers{mysql} Unhealthy Startup probe failed: kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:31 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Killing Container mysql failed startup probe, will be restarted kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:47:35 +0000 UTC Normal Pod operator-self-healing-mysql-2.spec.containers{mysql} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-psmysql" in 91.000814ms (91.019095ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator FinalizerInited Finalizer has been inited logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Applied Successfully apply chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-m9blw logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj Scheduled Successfully assigned kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-5rhlj to gke-jen-ps-523-f00253e-4-default-pool-e964acce-bq8g default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-m9blw.spec.containers{manager} Killing Stopping container manager kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:22 +0000 UTC Normal ReplicaSet.apps percona-server-mysql-operator-b4c599bbb SuccessfulCreate Created pod: percona-server-mysql-operator-b4c599bbb-5rhlj replicaset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:24 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:24 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 120.919008ms (120.937197ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:24 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Created Created container manager kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:24 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Started Started container manager kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:44 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-5rhlj_44faa263-129d-412b-9b6f-c7a42a107863 became leader percona-server-mysql-operator-b4c599bbb-5rhlj_44faa263-129d-412b-9b6f-c7a42a107863 logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:55 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-3 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:55 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:56 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:56 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 105.820215ms (105.834848ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:56 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:56 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:57 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:57 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 75.905503ms (75.932173ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:57 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:58 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:58 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:58 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 77.542969ms (77.556267ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:58 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:58 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:59 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-4 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:59 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:59 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 90.16174ms (90.420277ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:59 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:48:59 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulCreate create Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:00 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 91.085164ms (91.094816ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 74.603286ms (74.611758ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:01 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator FinalizerInited Finalizer has been inited logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Started Experiment has started logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Applied Successfully apply chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:09 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org percona-server-mysql-operator-b4c599bbb-5rhlj Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:13 +0000 UTC Warning Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Unhealthy Readiness probe failed: Get "http://10.208.41.26:8081/readyz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:21 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 93.335299ms (93.357863ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:49:43 +0000 UTC Warning Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Unhealthy Liveness probe failed: Get "http://10.208.41.26:8081/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator TimeUp Time up according to the duration logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Recovered Successfully recover chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:09 +0000 UTC Normal PodNetworkChaos.chaos-mesh.org percona-server-mysql-operator-b4c599bbb-5rhlj Updated Successfully update ObservedGeneration and FailedMessage of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:22 +0000 UTC Warning Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} BackOff Back-off restarting failed container manager in pod percona-server-mysql-operator-b4c599bbb-5rhlj_kuttl-test-modest-squid(6a66cd0c-8420-40df-81ec-25a1ba5a36cf) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:34 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 120.091925ms (120.118332ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:52 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-5rhlj_66089f33-a3d8-45c8-b8b8-62d91677ea63 became leader percona-server-mysql-operator-b4c599bbb-5rhlj_66089f33-a3d8-45c8-b8b8-62d91677ea63 logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Killing Stopping container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Killing Stopping container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-4 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:50:57 +0000 UTC Normal StatefulSet.apps operator-self-healing-haproxy SuccessfulDelete delete Pod operator-self-healing-haproxy-3 in StatefulSet operator-self-healing-haproxy successful statefulset-controller logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator FinalizerInited Finalizer has been inited logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Started Experiment has started logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Applied Successfully apply chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:02 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Killing Container manager definition changed, will be restarted kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:03 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:03 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulling Pulling image "gcr.io/google-containers/pause:latest" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:51:04 +0000 UTC Normal Pod percona-server-mysql-operator-b4c599bbb-5rhlj.spec.containers{manager} Pulled Successfully pulled image "gcr.io/google-containers/pause:latest" in 320.768ms (320.795289ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator TimeUp Time up according to the duration logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Recovered Successfully recover chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-5rhlj logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:02 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:18 +0000 UTC Normal Lease.coordination.k8s.io 08db2feb.percona.com LeaderElection percona-server-mysql-operator-b4c599bbb-5rhlj_df22f4c5-8b43-4dbb-a502-45317b891c80 became leader percona-server-mysql-operator-b4c599bbb-5rhlj_df22f4c5-8b43-4dbb-a502-45317b891c80 logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:24 +0000 UTC Normal Pod operator-self-healing-haproxy-3 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-3 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-w8zz default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:25 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:25 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 111.674973ms (111.683318ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:25 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:25 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 96.219725ms (96.262906ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 88.582786ms (88.60303ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:26 +0000 UTC Normal Pod operator-self-healing-haproxy-3.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:27 +0000 UTC Normal Pod operator-self-healing-haproxy-4 Scheduled Successfully assigned kuttl-test-modest-squid/operator-self-healing-haproxy-4 to gke-jen-ps-523-f00253e-4-default-pool-e964acce-7189 default-scheduler logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:28 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulling Pulling image "perconalab/percona-server-mysql-operator:PR-523-f00253e" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:28 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:PR-523-f00253e" in 107.100155ms (107.108718ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:28 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Created Created container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:28 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.initContainers{haproxy-init} Started Started container haproxy-init kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 81.306639ms (81.320081ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Created Created container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{haproxy} Started Started container haproxy kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulling Pulling image "perconalab/percona-server-mysql-operator:main-haproxy" kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Pulled Successfully pulled image "perconalab/percona-server-mysql-operator:main-haproxy" in 82.90522ms (82.919447ms including waiting) kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Created Created container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:30 +0000 UTC Normal Pod operator-self-healing-haproxy-4.spec.containers{mysql-monit} Started Started container mysql-monit kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:43 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator FinalizerInited Finalizer has been removed logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:43 +0000 UTC Normal NetworkChaos.chaos-mesh.org chaos-pod-network-loss-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator FinalizerInited Finalizer has been removed logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-failure-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Deleted Experiment has been deleted logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update desiredPhase of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Recovered Successfully recover chaos for kuttl-test-modest-squid/percona-server-mysql-operator-b4c599bbb-m9blw logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update records of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator FinalizerInited Finalizer has been removed logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:44 +0000 UTC Normal PodChaos.chaos-mesh.org chaos-pod-kill-operator Updated Successfully update finalizer of resource logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-hsprn.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qp7zg.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-controller-manager-d7b9476b-qqgxh.spec.containers{chaos-mesh} Killing Stopping container chaos-mesh kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-daemon-7wqhx.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-daemon-lvt9n.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | 2024-02-05 14:52:52 +0000 UTC Normal Pod chaos-daemon-zc94l.spec.containers{chaos-daemon} Killing Stopping container chaos-daemon kubelet logger.go:42: 14:53:15 | operator-self-healing | Deleting namespace: kuttl-test-modest-squid === CONT kuttl harness.go:405: run tests finished harness.go:513: cleaning up harness.go:570: removing temp folder: "" --- PASS: kuttl (613.50s) --- PASS: kuttl/harness (0.00s) --- PASS: kuttl/harness/operator-self-healing (610.73s) PASS